mirror of
https://github.com/torvalds/linux.git
synced 2025-12-01 07:26:02 +07:00
Merge tag 'block-6.18-20251009' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux
Pull block fixes from Jens Axboe: - Don't include __GFP_NOWARN for loop worker allocation, as it already uses GFP_NOWAIT which has __GFP_NOWARN set already - Small series cleaning up the recent bio_iov_iter_get_pages() changes - loop fix for leaking the backing reference file, if validation fails - Update of a comment pertaining to disk/partition stat locking * tag 'block-6.18-20251009' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux: loop: remove redundant __GFP_NOWARN flag block: move bio_iov_iter_get_bdev_pages to block/fops.c iomap: open code bio_iov_iter_get_bdev_pages block: rename bio_iov_iter_get_pages_aligned to bio_iov_iter_get_pages block: remove bio_iov_iter_get_pages block: Update a comment of disk statistics loop: fix backing file reference leak on validation error
This commit is contained in:
@@ -1316,7 +1316,7 @@ static int bio_iov_iter_align_down(struct bio *bio, struct iov_iter *iter,
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_iov_iter_get_pages_aligned - add user or kernel pages to a bio
|
||||
* bio_iov_iter_get_pages - add user or kernel pages to a bio
|
||||
* @bio: bio to add pages to
|
||||
* @iter: iov iterator describing the region to be added
|
||||
* @len_align_mask: the mask to align the total size to, 0 for any length
|
||||
@@ -1336,7 +1336,7 @@ static int bio_iov_iter_align_down(struct bio *bio, struct iov_iter *iter,
|
||||
* MM encounters an error pinning the requested pages, it stops. Error
|
||||
* is returned only if 0 pages could be pinned.
|
||||
*/
|
||||
int bio_iov_iter_get_pages_aligned(struct bio *bio, struct iov_iter *iter,
|
||||
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
|
||||
unsigned len_align_mask)
|
||||
{
|
||||
int ret = 0;
|
||||
@@ -1360,7 +1360,6 @@ int bio_iov_iter_get_pages_aligned(struct bio *bio, struct iov_iter *iter,
|
||||
return bio_iov_iter_align_down(bio, iter, len_align_mask);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages_aligned);
|
||||
|
||||
static void submit_bio_wait_endio(struct bio *bio)
|
||||
{
|
||||
|
||||
@@ -283,7 +283,11 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
|
||||
bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
ret = bio_iov_iter_get_pages(bio, iter);
|
||||
/*
|
||||
* No alignment requirements on our part to support arbitrary
|
||||
* passthrough commands.
|
||||
*/
|
||||
ret = bio_iov_iter_get_pages(bio, iter, 0);
|
||||
if (ret)
|
||||
goto out_put;
|
||||
ret = blk_rq_append_bio(rq, bio);
|
||||
|
||||
13
block/fops.c
13
block/fops.c
@@ -43,6 +43,13 @@ static bool blkdev_dio_invalid(struct block_device *bdev, struct kiocb *iocb,
|
||||
(bdev_logical_block_size(bdev) - 1);
|
||||
}
|
||||
|
||||
static inline int blkdev_iov_iter_get_pages(struct bio *bio,
|
||||
struct iov_iter *iter, struct block_device *bdev)
|
||||
{
|
||||
return bio_iov_iter_get_pages(bio, iter,
|
||||
bdev_logical_block_size(bdev) - 1);
|
||||
}
|
||||
|
||||
#define DIO_INLINE_BIO_VECS 4
|
||||
|
||||
static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
|
||||
@@ -78,7 +85,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
|
||||
if (iocb->ki_flags & IOCB_ATOMIC)
|
||||
bio.bi_opf |= REQ_ATOMIC;
|
||||
|
||||
ret = bio_iov_iter_get_bdev_pages(&bio, iter, bdev);
|
||||
ret = blkdev_iov_iter_get_pages(&bio, iter, bdev);
|
||||
if (unlikely(ret))
|
||||
goto out;
|
||||
ret = bio.bi_iter.bi_size;
|
||||
@@ -212,7 +219,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
||||
bio->bi_end_io = blkdev_bio_end_io;
|
||||
bio->bi_ioprio = iocb->ki_ioprio;
|
||||
|
||||
ret = bio_iov_iter_get_bdev_pages(bio, iter, bdev);
|
||||
ret = blkdev_iov_iter_get_pages(bio, iter, bdev);
|
||||
if (unlikely(ret)) {
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
bio_endio(bio);
|
||||
@@ -348,7 +355,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
|
||||
*/
|
||||
bio_iov_bvec_set(bio, iter);
|
||||
} else {
|
||||
ret = bio_iov_iter_get_bdev_pages(bio, iter, bdev);
|
||||
ret = blkdev_iov_iter_get_pages(bio, iter, bdev);
|
||||
if (unlikely(ret))
|
||||
goto out_bio_put;
|
||||
}
|
||||
|
||||
@@ -551,8 +551,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
|
||||
return -EBADF;
|
||||
|
||||
error = loop_check_backing_file(file);
|
||||
if (error)
|
||||
if (error) {
|
||||
fput(file);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* suppress uevents while reconfiguring the device */
|
||||
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
|
||||
@@ -822,7 +824,7 @@ static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
|
||||
if (worker)
|
||||
goto queue_work;
|
||||
|
||||
worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT | __GFP_NOWARN);
|
||||
worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT);
|
||||
/*
|
||||
* In the event we cannot allocate a worker, just queue on the
|
||||
* rootcg worker and issue the I/O as the rootcg
|
||||
@@ -993,8 +995,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
|
||||
return -EBADF;
|
||||
|
||||
error = loop_check_backing_file(file);
|
||||
if (error)
|
||||
if (error) {
|
||||
fput(file);
|
||||
return error;
|
||||
}
|
||||
|
||||
is_loop = is_loop_device(file);
|
||||
|
||||
|
||||
@@ -433,7 +433,8 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
|
||||
bio->bi_private = dio;
|
||||
bio->bi_end_io = iomap_dio_bio_end_io;
|
||||
|
||||
ret = bio_iov_iter_get_bdev_pages(bio, dio->submit.iter, iomap->bdev);
|
||||
ret = bio_iov_iter_get_pages(bio, dio->submit.iter,
|
||||
bdev_logical_block_size(iomap->bdev) - 1);
|
||||
if (unlikely(ret)) {
|
||||
/*
|
||||
* We have to stop part way through an IO. We must fall
|
||||
|
||||
@@ -446,14 +446,9 @@ int submit_bio_wait(struct bio *bio);
|
||||
int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data,
|
||||
size_t len, enum req_op op);
|
||||
|
||||
int bio_iov_iter_get_pages_aligned(struct bio *bio, struct iov_iter *iter,
|
||||
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
|
||||
unsigned len_align_mask);
|
||||
|
||||
static inline int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
||||
{
|
||||
return bio_iov_iter_get_pages_aligned(bio, iter, 0);
|
||||
}
|
||||
|
||||
void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter);
|
||||
void __bio_release_pages(struct bio *bio, bool mark_dirty);
|
||||
extern void bio_set_pages_dirty(struct bio *bio);
|
||||
|
||||
@@ -1873,13 +1873,6 @@ static inline int bio_split_rw_at(struct bio *bio,
|
||||
return bio_split_io_at(bio, lim, segs, max_bytes, lim->dma_alignment);
|
||||
}
|
||||
|
||||
static inline int bio_iov_iter_get_bdev_pages(struct bio *bio,
|
||||
struct iov_iter *iter, struct block_device *bdev)
|
||||
{
|
||||
return bio_iov_iter_get_pages_aligned(bio, iter,
|
||||
bdev_logical_block_size(bdev) - 1);
|
||||
}
|
||||
|
||||
#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
|
||||
|
||||
#endif /* _LINUX_BLKDEV_H */
|
||||
|
||||
@@ -17,8 +17,8 @@ struct disk_stats {
|
||||
/*
|
||||
* Macros to operate on percpu disk statistics:
|
||||
*
|
||||
* {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters and should
|
||||
* be called between disk_stat_lock() and disk_stat_unlock().
|
||||
* part_stat_{add|sub|inc|dec}() modify the stat counters and should
|
||||
* be called between part_stat_lock() and part_stat_unlock().
|
||||
*
|
||||
* part_stat_read() can be called at any time.
|
||||
*/
|
||||
|
||||
Reference in New Issue
Block a user