--- x/include/linux/io_uring_types.h +++ y/include/linux/io_uring_types.h @@ -578,6 +578,7 @@ struct io_kiocb { struct io_kiocb *link; /* custom credentials, valid IFF REQ_F_CREDS is set */ const struct cred *creds; + struct file *poll_file; struct io_wq_work work; }; --- x/io_uring/poll.c +++ y/io_uring/poll.c @@ -714,6 +714,8 @@ int io_arm_poll_handler(struct io_kiocb io_kbuf_recycle(req, issue_flags); + get_file(req->file); + req->poll_file = req->file; ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags); if (ret) return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED; @@ -943,6 +945,8 @@ int io_poll_add(struct io_kiocb *req, un if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER)) req->flags |= REQ_F_HASH_LOCKED; + get_file(req->file); + req->poll_file = req->file; ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags); if (ret > 0) { io_req_set_res(req, ipt.result_mask, 0); --- x/io_uring/io_uring.c +++ y/io_uring/io_uring.c @@ -248,8 +248,12 @@ static __cold void io_fallback_req_func( bool locked = false; percpu_ref_get(&ctx->refs); - llist_for_each_entry_safe(req, tmp, node, io_task_work.node) + llist_for_each_entry_safe(req, tmp, node, io_task_work.node) { req->io_task_work.func(req, &locked); + if (req_ref_put_and_test(req)) { + io_free_req(req); + } + } if (locked) { io_submit_flush_completions(ctx); @@ -976,6 +980,8 @@ static void __io_req_complete_post(struc io_put_kbuf_comp(req); io_dismantle_req(req); io_put_task(req->task, 1); + if (req->poll_file) + fput(req->poll_file); wq_list_add_head(&req->comp_list, &ctx->locked_free_list); ctx->locked_free_nr++; } @@ -1099,6 +1105,8 @@ __cold void io_free_req(struct io_kiocb io_req_put_rsrc(req); io_dismantle_req(req); io_put_task(req->task, 1); + if (req->poll_file) + fput(req->poll_file); spin_lock(&ctx->completion_lock); wq_list_add_head(&req->comp_list, &ctx->locked_free_list); @@ -1169,6 +1177,9 @@ static unsigned int handle_tw_list(struc req->io_task_work.func(req, locked); node = next; count++; + if (req_ref_put_and_test(req)) { + io_free_req(req); + } } return count; @@ -1296,6 +1307,8 @@ void __io_req_task_work_add(struct io_ki if (!llist_add(&req->io_task_work.node, &tctx->task_list)) return; + if (req->flags & REQ_F_REFCOUNT) + req_ref_get(req); if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); @@ -2140,6 +2153,7 @@ static int io_init_req(struct io_ring_ct req->flags = sqe_flags = READ_ONCE(sqe->flags); req->cqe.user_data = READ_ONCE(sqe->user_data); req->file = NULL; + req->poll_file = NULL; req->rsrc_node = NULL; req->task = current;