io_uring/zcrx: remove sync refill uapi

There is a better way to handle the problem IORING_REGISTER_ZCRX_REFILL
solves. The uapi can also be slightly adjusted to accommodate future
extensions. Remove the feature for now, it'll be reworked for the next
release.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov
2025-11-03 13:53:13 +00:00
committed by Jens Axboe
parent 6f1cbf6d6f
commit 819630bd6f
4 changed files with 0 additions and 90 deletions

View File

@@ -689,9 +689,6 @@ enum io_uring_register_op {
/* query various aspects of io_uring, see linux/io_uring/query.h */
IORING_REGISTER_QUERY = 35,
/* return zcrx buffers back into circulation */
IORING_REGISTER_ZCRX_REFILL = 36,
/* this goes last */
IORING_REGISTER_LAST,
@@ -1073,15 +1070,6 @@ struct io_uring_zcrx_ifq_reg {
__u64 __resv[3];
};
struct io_uring_zcrx_sync_refill {
__u32 zcrx_id;
/* the number of entries to return */
__u32 nr_entries;
/* pointer to an array of struct io_uring_zcrx_rqe */
__u64 rqes;
__u64 __resv[2];
};
#ifdef __cplusplus
}
#endif

View File

@@ -827,9 +827,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
case IORING_REGISTER_QUERY:
ret = io_query(ctx, arg, nr_args);
break;
case IORING_REGISTER_ZCRX_REFILL:
ret = io_zcrx_return_bufs(ctx, arg, nr_args);
break;
default:
ret = -EINVAL;
break;

View File

@@ -928,74 +928,6 @@ static const struct memory_provider_ops io_uring_pp_zc_ops = {
.uninstall = io_pp_uninstall,
};
#define IO_ZCRX_MAX_SYS_REFILL_BUFS (1 << 16)
#define IO_ZCRX_SYS_REFILL_BATCH 32
static void io_return_buffers(struct io_zcrx_ifq *ifq,
struct io_uring_zcrx_rqe *rqes, unsigned nr)
{
int i;
for (i = 0; i < nr; i++) {
struct net_iov *niov;
netmem_ref netmem;
if (!io_parse_rqe(&rqes[i], ifq, &niov))
continue;
scoped_guard(spinlock_bh, &ifq->rq_lock) {
if (!io_zcrx_put_niov_uref(niov))
continue;
}
netmem = net_iov_to_netmem(niov);
if (!page_pool_unref_and_test(netmem))
continue;
io_zcrx_return_niov(niov);
}
}
int io_zcrx_return_bufs(struct io_ring_ctx *ctx,
void __user *arg, unsigned nr_arg)
{
struct io_uring_zcrx_rqe rqes[IO_ZCRX_SYS_REFILL_BATCH];
struct io_uring_zcrx_rqe __user *user_rqes;
struct io_uring_zcrx_sync_refill zr;
struct io_zcrx_ifq *ifq;
unsigned nr, i;
if (nr_arg)
return -EINVAL;
if (copy_from_user(&zr, arg, sizeof(zr)))
return -EFAULT;
if (!zr.nr_entries || zr.nr_entries > IO_ZCRX_MAX_SYS_REFILL_BUFS)
return -EINVAL;
if (!mem_is_zero(&zr.__resv, sizeof(zr.__resv)))
return -EINVAL;
ifq = xa_load(&ctx->zcrx_ctxs, zr.zcrx_id);
if (!ifq)
return -EINVAL;
nr = zr.nr_entries;
user_rqes = u64_to_user_ptr(zr.rqes);
for (i = 0; i < nr;) {
unsigned batch = min(nr - i, IO_ZCRX_SYS_REFILL_BATCH);
size_t size = batch * sizeof(rqes[0]);
if (copy_from_user(rqes, user_rqes + i, size))
return i ? i : -EFAULT;
io_return_buffers(ifq, rqes, batch);
i += batch;
if (fatal_signal_pending(current))
return i;
cond_resched();
}
return nr;
}
static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov,
struct io_zcrx_ifq *ifq, int off, int len)
{

View File

@@ -63,8 +63,6 @@ struct io_zcrx_ifq {
};
#if defined(CONFIG_IO_URING_ZCRX)
int io_zcrx_return_bufs(struct io_ring_ctx *ctx,
void __user *arg, unsigned nr_arg);
int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
struct io_uring_zcrx_ifq_reg __user *arg);
void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx);
@@ -97,11 +95,6 @@ static inline struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ct
{
return NULL;
}
static inline int io_zcrx_return_bufs(struct io_ring_ctx *ctx,
void __user *arg, unsigned nr_arg)
{
return -EOPNOTSUPP;
}
#endif
int io_recvzc(struct io_kiocb *req, unsigned int issue_flags);