Merge tag 'for-6.15/io_uring-20250322' of git://git.kernel.dk/linux

Pull io_uring updates from Jens Axboe:
 "This is the first of the io_uring pull requests for the 6.15 merge
  window, there will be others once the net tree has gone in. This
  contains:

   - Cleanup and unification of cancelation handling across various
     request types.

   - Improvement for bundles, supporting them both for incrementally
     consumed buffers, and for non-multishot requests.

   - Enable toggling of using iowait while waiting on io_uring events or
     not. Unfortunately this is still tied with CPU frequency boosting
     on short waits, as the scheduler side has not been very receptive
     to splitting the (useless) iowait stat from the cpufreq implied
     boost.

   - Add support for kbuf nodes, enabling zero-copy support for the ublk
     block driver.

   - Various cleanups for resource node handling.

   - Series greatly cleaning up the legacy provided (non-ring based)
     buffers. For years, we've been pushing the ring provided buffers as
     the way to go, and that is what people have been using. Reduce the
     complexity and code associated with legacy provided buffers.

   - Series cleaning up the compat handling.

   - Series improving and cleaning up the recvmsg/sendmsg iovec and msg
     handling.

   - Series of cleanups for io-wq.

   - Start adding a bunch of selftests. The liburing repository
     generally carries feature and regression tests for everything, but
     at least for ublk initially, we'll try and go the route of having
     it in selftests as well. We'll see how this goes, might decide to
     migrate more tests this way in the future.

   - Various little cleanups and fixes"

* tag 'for-6.15/io_uring-20250322' of git://git.kernel.dk/linux: (108 commits)
  selftests: ublk: add stripe target
  selftests: ublk: simplify loop io completion
  selftests: ublk: enable zero copy for null target
  selftests: ublk: prepare for supporting stripe target
  selftests: ublk: move common code into common.c
  selftests: ublk: increase max buffer size to 1MB
  selftests: ublk: add single sqe allocator helper
  selftests: ublk: add generic_01 for verifying sequential IO order
  selftests: ublk: fix starting ublk device
  io_uring: enable toggle of iowait usage when waiting on CQEs
  selftests: ublk: fix write cache implementation
  selftests: ublk: add variable for user to not show test result
  selftests: ublk: don't show `modprobe` failure
  selftests: ublk: add one dependency header
  io_uring/kbuf: enable bundles for incrementally consumed buffers
  Revert "io_uring/rsrc: simplify the bvec iter count calculation"
  selftests: ublk: improve test usability
  selftests: ublk: add stress test for covering IO vs. killing ublk server
  selftests: ublk: add one stress test for covering IO vs. removing device
  selftests: ublk: load/unload ublk_drv when preparing & cleaning up tests
  ...
This commit is contained in:
Linus Torvalds
2025-03-26 17:56:00 -07:00
58 changed files with 3884 additions and 873 deletions

View File

@@ -75,7 +75,7 @@ struct io_sr_msg {
u16 flags;
/* initialised and used only by !msg send variants */
u16 buf_group;
u16 buf_index;
bool retry;
void __user *msg_control;
/* used only for send zerocopy */
struct io_kiocb *notif;
@@ -187,17 +187,15 @@ static inline void io_mshot_prep_retry(struct io_kiocb *req,
req->flags &= ~REQ_F_BL_EMPTY;
sr->done_io = 0;
sr->retry = false;
sr->len = 0; /* get from the provided buffer */
req->buf_index = sr->buf_group;
}
#ifdef CONFIG_COMPAT
static int io_compat_msg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg,
struct compat_msghdr *msg, int ddir)
static int io_net_import_vec(struct io_kiocb *req, struct io_async_msghdr *iomsg,
const struct iovec __user *uiov, unsigned uvec_seg,
int ddir)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct compat_iovec __user *uiov;
struct iovec *iov;
int ret, nr_segs;
@@ -205,103 +203,108 @@ static int io_compat_msg_copy_hdr(struct io_kiocb *req,
nr_segs = iomsg->free_iov_nr;
iov = iomsg->free_iov;
} else {
iov = &iomsg->fast_iov;
nr_segs = 1;
iov = &iomsg->fast_iov;
}
ret = __import_iovec(ddir, uiov, uvec_seg, nr_segs, &iov,
&iomsg->msg.msg_iter, io_is_compat(req->ctx));
if (unlikely(ret < 0))
return ret;
io_net_vec_assign(req, iomsg, iov);
return 0;
}
static int io_compat_msg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg,
struct compat_msghdr *msg, int ddir,
struct sockaddr __user **save_addr)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct compat_iovec __user *uiov;
int ret;
if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
return -EFAULT;
ret = __get_compat_msghdr(&iomsg->msg, msg, save_addr);
if (ret)
return ret;
uiov = compat_ptr(msg->msg_iov);
if (req->flags & REQ_F_BUFFER_SELECT) {
compat_ssize_t clen;
if (msg->msg_iovlen == 0) {
sr->len = iov->iov_len = 0;
iov->iov_base = NULL;
sr->len = 0;
} else if (msg->msg_iovlen > 1) {
return -EINVAL;
} else {
if (!access_ok(uiov, sizeof(*uiov)))
struct compat_iovec tmp_iov;
if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov)))
return -EFAULT;
if (__get_user(clen, &uiov->iov_len))
return -EFAULT;
if (clen < 0)
return -EINVAL;
sr->len = clen;
sr->len = tmp_iov.iov_len;
}
return 0;
}
ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
nr_segs, &iov, &iomsg->msg.msg_iter, true);
if (unlikely(ret < 0))
return ret;
io_net_vec_assign(req, iomsg, iov);
return 0;
return io_net_import_vec(req, iomsg, (struct iovec __user *)uiov,
msg->msg_iovlen, ddir);
}
#endif
static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
struct user_msghdr *msg, int ddir)
static int io_copy_msghdr_from_user(struct user_msghdr *msg,
struct user_msghdr __user *umsg)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct user_msghdr __user *umsg = sr->umsg;
struct iovec *iov;
int ret, nr_segs;
if (iomsg->free_iov) {
nr_segs = iomsg->free_iov_nr;
iov = iomsg->free_iov;
} else {
iov = &iomsg->fast_iov;
nr_segs = 1;
}
if (!user_access_begin(umsg, sizeof(*umsg)))
return -EFAULT;
ret = -EFAULT;
unsafe_get_user(msg->msg_name, &umsg->msg_name, ua_end);
unsafe_get_user(msg->msg_namelen, &umsg->msg_namelen, ua_end);
unsafe_get_user(msg->msg_iov, &umsg->msg_iov, ua_end);
unsafe_get_user(msg->msg_iovlen, &umsg->msg_iovlen, ua_end);
unsafe_get_user(msg->msg_control, &umsg->msg_control, ua_end);
unsafe_get_user(msg->msg_controllen, &umsg->msg_controllen, ua_end);
user_access_end();
return 0;
ua_end:
user_access_end();
return -EFAULT;
}
static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
struct user_msghdr *msg, int ddir,
struct sockaddr __user **save_addr)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct user_msghdr __user *umsg = sr->umsg;
int ret;
ret = io_copy_msghdr_from_user(msg, umsg);
if (unlikely(ret))
return ret;
msg->msg_flags = 0;
ret = __copy_msghdr(&iomsg->msg, msg, save_addr);
if (ret)
return ret;
if (req->flags & REQ_F_BUFFER_SELECT) {
if (msg->msg_iovlen == 0) {
sr->len = iov->iov_len = 0;
iov->iov_base = NULL;
sr->len = 0;
} else if (msg->msg_iovlen > 1) {
ret = -EINVAL;
goto ua_end;
return -EINVAL;
} else {
struct iovec __user *uiov = msg->msg_iov;
struct iovec tmp_iov;
/* we only need the length for provided buffers */
if (!access_ok(&uiov->iov_len, sizeof(uiov->iov_len)))
goto ua_end;
unsafe_get_user(iov->iov_len, &uiov->iov_len, ua_end);
sr->len = iov->iov_len;
if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov)))
return -EFAULT;
sr->len = tmp_iov.iov_len;
}
ret = 0;
ua_end:
user_access_end();
return ret;
return 0;
}
user_access_end();
ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, nr_segs,
&iov, &iomsg->msg.msg_iter, false);
if (unlikely(ret < 0))
return ret;
io_net_vec_assign(req, iomsg, iov);
return 0;
return io_net_import_vec(req, iomsg, msg->msg_iov, msg->msg_iovlen, ddir);
}
static int io_sendmsg_copy_hdr(struct io_kiocb *req,
@@ -314,26 +317,16 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
iomsg->msg.msg_name = &iomsg->addr;
iomsg->msg.msg_iter.nr_segs = 0;
#ifdef CONFIG_COMPAT
if (unlikely(req->ctx->compat)) {
if (io_is_compat(req->ctx)) {
struct compat_msghdr cmsg;
ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
if (unlikely(ret))
return ret;
ret = __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE,
NULL);
sr->msg_control = iomsg->msg.msg_control_user;
return ret;
}
#endif
ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
if (unlikely(ret))
return ret;
ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE, NULL);
/* save msg_control as sys_sendmsg() overwrites it */
sr->msg_control = iomsg->msg.msg_control_user;
return ret;
@@ -387,14 +380,10 @@ static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg = req->async_data;
int ret;
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
ret = io_sendmsg_copy_hdr(req, kmsg);
if (!ret)
req->flags |= REQ_F_NEED_CLEANUP;
return ret;
return io_sendmsg_copy_hdr(req, kmsg);
}
#define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE)
@@ -404,6 +393,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
sr->done_io = 0;
sr->retry = false;
if (req->opcode != IORING_OP_SEND) {
if (sqe->addr2 || sqe->file_index)
@@ -427,10 +417,9 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req->buf_list = NULL;
}
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
if (io_is_compat(req->ctx))
sr->msg_flags |= MSG_CMSG_COMPAT;
#endif
if (unlikely(!io_msg_alloc_async(req)))
return -ENOMEM;
if (req->opcode != IORING_OP_SENDMSG)
@@ -714,31 +703,20 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req,
iomsg->msg.msg_name = &iomsg->addr;
iomsg->msg.msg_iter.nr_segs = 0;
#ifdef CONFIG_COMPAT
if (unlikely(req->ctx->compat)) {
if (io_is_compat(req->ctx)) {
struct compat_msghdr cmsg;
ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
if (unlikely(ret))
return ret;
ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
if (unlikely(ret))
return ret;
return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
cmsg.msg_controllen);
ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST,
&iomsg->uaddr);
memset(&msg, 0, sizeof(msg));
msg.msg_namelen = cmsg.msg_namelen;
msg.msg_controllen = cmsg.msg_controllen;
} else {
ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST, &iomsg->uaddr);
}
#endif
ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
if (unlikely(ret))
return ret;
ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
if (unlikely(ret))
return ret;
return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
msg.msg_controllen);
}
@@ -772,10 +750,7 @@ static int io_recvmsg_prep_setup(struct io_kiocb *req)
return 0;
}
ret = io_recvmsg_copy_hdr(req, kmsg);
if (!ret)
req->flags |= REQ_F_NEED_CLEANUP;
return ret;
return io_recvmsg_copy_hdr(req, kmsg);
}
#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT | \
@@ -786,6 +761,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
sr->done_io = 0;
sr->retry = false;
if (unlikely(sqe->file_index || sqe->addr2))
return -EINVAL;
@@ -826,14 +802,16 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EINVAL;
}
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
if (io_is_compat(req->ctx))
sr->msg_flags |= MSG_CMSG_COMPAT;
#endif
sr->nr_multishot_loops = 0;
return io_recvmsg_prep_setup(req);
}
/* bits to clear in old and inherit in new cflags on bundle retry */
#define CQE_F_MASK (IORING_CQE_F_SOCK_NONEMPTY|IORING_CQE_F_MORE)
/*
* Finishes io_recv and io_recvmsg.
*
@@ -853,9 +831,19 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
if (sr->flags & IORING_RECVSEND_BUNDLE) {
cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret),
issue_flags);
if (sr->retry)
cflags = req->cqe.flags | (cflags & CQE_F_MASK);
/* bundle with no more immediate buffers, we're done */
if (req->flags & REQ_F_BL_EMPTY)
goto finish;
/* if more is available, retry and append to this one */
if (!sr->retry && kmsg->msg.msg_inq > 0 && *ret > 0) {
req->cqe.flags = cflags & ~CQE_F_MASK;
sr->len = kmsg->msg.msg_inq;
sr->done_io += *ret;
sr->retry = true;
return false;
}
} else {
cflags |= io_put_kbuf(req, *ret, issue_flags);
}
@@ -1234,6 +1222,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
struct io_kiocb *notif;
zc->done_io = 0;
zc->retry = false;
req->flags |= REQ_F_POLL_NO_LAZY;
if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
@@ -1272,14 +1261,13 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
zc->len = READ_ONCE(sqe->len);
zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY;
zc->buf_index = READ_ONCE(sqe->buf_index);
req->buf_index = READ_ONCE(sqe->buf_index);
if (zc->msg_flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
if (io_is_compat(req->ctx))
zc->msg_flags |= MSG_CMSG_COMPAT;
#endif
if (unlikely(!io_msg_alloc_async(req)))
return -ENOMEM;
if (req->opcode != IORING_OP_SENDMSG_ZC)
@@ -1344,24 +1332,10 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
int ret;
if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
struct io_ring_ctx *ctx = req->ctx;
struct io_rsrc_node *node;
ret = -EFAULT;
io_ring_submit_lock(ctx, issue_flags);
node = io_rsrc_node_lookup(&ctx->buf_table, sr->buf_index);
if (node) {
io_req_assign_buf_node(sr->notif, node);
ret = 0;
}
io_ring_submit_unlock(ctx, issue_flags);
if (unlikely(ret))
return ret;
ret = io_import_fixed(ITER_SOURCE, &kmsg->msg.msg_iter,
node->buf, (u64)(uintptr_t)sr->buf,
sr->len);
sr->notif->buf_index = req->buf_index;
ret = io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter,
(u64)(uintptr_t)sr->buf, sr->len,
ITER_SOURCE, issue_flags);
if (unlikely(ret))
return ret;
kmsg->msg.sg_from_iter = io_sg_from_iter;
@@ -1600,7 +1574,6 @@ retry:
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
} else if (!fixed) {
fd_install(fd, file);
ret = fd;
@@ -1613,14 +1586,8 @@ retry:
if (!arg.is_empty)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
io_req_set_res(req, ret, cflags);
return IOU_OK;
}
if (ret < 0)
return ret;
if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
if (ret >= 0 && (req->flags & REQ_F_APOLL_MULTISHOT) &&
io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
if (cflags & IORING_CQE_F_SOCK_NONEMPTY || arg.is_empty == -1)
goto retry;
if (issue_flags & IO_URING_F_MULTISHOT)
@@ -1629,6 +1596,10 @@ retry:
}
io_req_set_res(req, ret, cflags);
if (ret < 0)
req_set_fail(req);
if (!(issue_flags & IO_URING_F_MULTISHOT))
return IOU_OK;
return IOU_STOP_MULTISHOT;
}