@@ -1753,6 +1753,9 @@ static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb,
17531753 struct io_ring_ctx * ctx = req -> ctx ;
17541754 int ret , notify ;
17551755
1756+ if (tsk -> flags & PF_EXITING )
1757+ return - ESRCH ;
1758+
17561759 /*
17571760 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
17581761 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
@@ -1787,8 +1790,10 @@ static void __io_req_task_cancel(struct io_kiocb *req, int error)
17871790static void io_req_task_cancel (struct callback_head * cb )
17881791{
17891792 struct io_kiocb * req = container_of (cb , struct io_kiocb , task_work );
1793+ struct io_ring_ctx * ctx = req -> ctx ;
17901794
17911795 __io_req_task_cancel (req , - ECANCELED );
1796+ percpu_ref_put (& ctx -> refs );
17921797}
17931798
17941799static void __io_req_task_submit (struct io_kiocb * req )
@@ -2010,6 +2015,12 @@ static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
20102015
20112016static inline bool io_run_task_work (void )
20122017{
2018+ /*
2019+ * Not safe to run on exiting task, and the task_work handling will
2020+ * not add work to such a task.
2021+ */
2022+ if (unlikely (current -> flags & PF_EXITING ))
2023+ return false;
20132024 if (current -> task_works ) {
20142025 __set_current_state (TASK_RUNNING );
20152026 task_work_run ();
@@ -2283,13 +2294,17 @@ static bool io_resubmit_prep(struct io_kiocb *req, int error)
22832294 goto end_req ;
22842295 }
22852296
2286- ret = io_import_iovec (rw , req , & iovec , & iter , false);
2287- if (ret < 0 )
2288- goto end_req ;
2289- ret = io_setup_async_rw (req , iovec , inline_vecs , & iter , false);
2290- if (!ret )
2297+ if (!req -> io ) {
2298+ ret = io_import_iovec (rw , req , & iovec , & iter , false);
2299+ if (ret < 0 )
2300+ goto end_req ;
2301+ ret = io_setup_async_rw (req , iovec , inline_vecs , & iter , false);
2302+ if (!ret )
2303+ return true;
2304+ kfree (iovec );
2305+ } else {
22912306 return true;
2292- kfree ( iovec );
2307+ }
22932308end_req :
22942309 req_set_fail_links (req );
22952310 io_req_complete (req , ret );
@@ -3115,6 +3130,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
31153130 struct iov_iter __iter , * iter = & __iter ;
31163131 ssize_t io_size , ret , ret2 ;
31173132 size_t iov_count ;
3133+ bool no_async ;
31183134
31193135 if (req -> io )
31203136 iter = & req -> io -> rw .iter ;
@@ -3132,7 +3148,8 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
31323148 kiocb -> ki_flags &= ~IOCB_NOWAIT ;
31333149
31343150 /* If the file doesn't support async, just async punt */
3135- if (force_nonblock && !io_file_supports_async (req -> file , READ ))
3151+ no_async = force_nonblock && !io_file_supports_async (req -> file , READ );
3152+ if (no_async )
31363153 goto copy_iov ;
31373154
31383155 ret = rw_verify_area (READ , req -> file , io_kiocb_ppos (kiocb ), iov_count );
@@ -3176,6 +3193,8 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
31763193 ret = ret2 ;
31773194 goto out_free ;
31783195 }
3196+ if (no_async )
3197+ return - EAGAIN ;
31793198 /* it's copied and will be cleaned with ->io */
31803199 iovec = NULL ;
31813200 /* now use our persistent iterator, if we aren't already */
@@ -3508,8 +3527,6 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
35083527 const char __user * fname ;
35093528 int ret ;
35103529
3511- if (unlikely (req -> ctx -> flags & (IORING_SETUP_IOPOLL |IORING_SETUP_SQPOLL )))
3512- return - EINVAL ;
35133530 if (unlikely (sqe -> ioprio || sqe -> buf_index ))
35143531 return - EINVAL ;
35153532 if (unlikely (req -> flags & REQ_F_FIXED_FILE ))
@@ -3536,6 +3553,8 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
35363553{
35373554 u64 flags , mode ;
35383555
3556+ if (unlikely (req -> ctx -> flags & (IORING_SETUP_IOPOLL |IORING_SETUP_SQPOLL )))
3557+ return - EINVAL ;
35393558 if (req -> flags & REQ_F_NEED_CLEANUP )
35403559 return 0 ;
35413560 mode = READ_ONCE (sqe -> len );
@@ -3550,6 +3569,8 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
35503569 size_t len ;
35513570 int ret ;
35523571
3572+ if (unlikely (req -> ctx -> flags & (IORING_SETUP_IOPOLL |IORING_SETUP_SQPOLL )))
3573+ return - EINVAL ;
35533574 if (req -> flags & REQ_F_NEED_CLEANUP )
35543575 return 0 ;
35553576 how = u64_to_user_ptr (READ_ONCE (sqe -> addr2 ));
@@ -3767,7 +3788,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
37673788#if defined(CONFIG_EPOLL )
37683789 if (sqe -> ioprio || sqe -> buf_index )
37693790 return - EINVAL ;
3770- if (unlikely (req -> ctx -> flags & IORING_SETUP_IOPOLL ))
3791+ if (unlikely (req -> ctx -> flags & ( IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL ) ))
37713792 return - EINVAL ;
37723793
37733794 req -> epoll .epfd = READ_ONCE (sqe -> fd );
@@ -3882,7 +3903,7 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
38823903
38833904static int io_statx_prep (struct io_kiocb * req , const struct io_uring_sqe * sqe )
38843905{
3885- if (unlikely (req -> ctx -> flags & IORING_SETUP_IOPOLL ))
3906+ if (unlikely (req -> ctx -> flags & ( IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL ) ))
38863907 return - EINVAL ;
38873908 if (sqe -> ioprio || sqe -> buf_index )
38883909 return - EINVAL ;
@@ -5399,6 +5420,8 @@ static int io_async_cancel(struct io_kiocb *req)
53995420static int io_files_update_prep (struct io_kiocb * req ,
54005421 const struct io_uring_sqe * sqe )
54015422{
5423+ if (unlikely (req -> ctx -> flags & IORING_SETUP_SQPOLL ))
5424+ return - EINVAL ;
54025425 if (unlikely (req -> flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT )))
54035426 return - EINVAL ;
54045427 if (sqe -> ioprio || sqe -> rw_flags )
@@ -5449,6 +5472,8 @@ static int io_req_defer_prep(struct io_kiocb *req,
54495472 if (unlikely (ret ))
54505473 return ret ;
54515474
5475+ io_prep_async_work (req );
5476+
54525477 switch (req -> opcode ) {
54535478 case IORING_OP_NOP :
54545479 break ;
@@ -8180,6 +8205,8 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
81808205 /* cancel this request, or head link requests */
81818206 io_attempt_cancel (ctx , cancel_req );
81828207 io_put_req (cancel_req );
8208+ /* cancellations _may_ trigger task work */
8209+ io_run_task_work ();
81838210 schedule ();
81848211 finish_wait (& ctx -> inflight_wait , & wait );
81858212 }
0 commit comments