mirror of
https://github.com/torvalds/linux.git
synced 2025-11-30 23:16:01 +07:00
fs: replace use of system_unbound_wq with system_dfl_wq
Currently if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistentcy cannot be addressed without refactoring the API. system_unbound_wq should be the default workqueue so as not to enforce locality constraints for random work whenever it's not required. Adding system_dfl_wq to encourage its use when unbound work should be used. The old system_unbound_wq will be kept for a few release cycles. Suggested-by: Tejun Heo <tj@kernel.org> Signed-off-by: Marco Crivellari <marco.crivellari@suse.com> Link: https://lore.kernel.org/20250916082906.77439-2-marco.crivellari@suse.com Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
committed by
Christian Brauner
parent
8f5ae30d69
commit
7a4f92d39f
@@ -42,7 +42,7 @@ static void afs_volume_init_callback(struct afs_volume *volume)
|
||||
list_for_each_entry(vnode, &volume->open_mmaps, cb_mmap_link) {
|
||||
if (vnode->cb_v_check != atomic_read(&volume->cb_v_break)) {
|
||||
afs_clear_cb_promise(vnode, afs_cb_promise_clear_vol_init_cb);
|
||||
queue_work(system_unbound_wq, &vnode->cb_work);
|
||||
queue_work(system_dfl_wq, &vnode->cb_work);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reas
|
||||
if (reason != afs_cb_break_for_deleted &&
|
||||
vnode->status.type == AFS_FTYPE_FILE &&
|
||||
atomic_read(&vnode->cb_nr_mmap))
|
||||
queue_work(system_unbound_wq, &vnode->cb_work);
|
||||
queue_work(system_dfl_wq, &vnode->cb_work);
|
||||
|
||||
trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true);
|
||||
} else {
|
||||
|
||||
@@ -172,7 +172,7 @@ static void afs_issue_write_worker(struct work_struct *work)
|
||||
void afs_issue_write(struct netfs_io_subrequest *subreq)
|
||||
{
|
||||
subreq->work.func = afs_issue_write_worker;
|
||||
if (!queue_work(system_unbound_wq, &subreq->work))
|
||||
if (!queue_work(system_dfl_wq, &subreq->work))
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
|
||||
@@ -827,7 +827,7 @@ int bch2_journal_keys_to_write_buffer_end(struct bch_fs *c, struct journal_keys_
|
||||
|
||||
if (bch2_btree_write_buffer_should_flush(c) &&
|
||||
__enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_btree_write_buffer) &&
|
||||
!queue_work(system_unbound_wq, &c->btree_write_buffer.flush_work))
|
||||
!queue_work(system_dfl_wq, &c->btree_write_buffer.flush_work))
|
||||
enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_write_buffer);
|
||||
|
||||
if (dst->wb == &wb->flushing)
|
||||
|
||||
@@ -684,7 +684,7 @@ static void bch2_rbio_error(struct bch_read_bio *rbio,
|
||||
|
||||
if (bch2_err_matches(ret, BCH_ERR_data_read_retry)) {
|
||||
bch2_rbio_punt(rbio, bch2_rbio_retry,
|
||||
RBIO_CONTEXT_UNBOUND, system_unbound_wq);
|
||||
RBIO_CONTEXT_UNBOUND, system_dfl_wq);
|
||||
} else {
|
||||
rbio = bch2_rbio_free(rbio);
|
||||
|
||||
@@ -921,10 +921,10 @@ csum_err:
|
||||
bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_csum_err, BLK_STS_IOERR);
|
||||
goto out;
|
||||
decompression_err:
|
||||
bch2_rbio_punt(rbio, bch2_read_decompress_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq);
|
||||
bch2_rbio_punt(rbio, bch2_read_decompress_err, RBIO_CONTEXT_UNBOUND, system_dfl_wq);
|
||||
goto out;
|
||||
decrypt_err:
|
||||
bch2_rbio_punt(rbio, bch2_read_decrypt_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq);
|
||||
bch2_rbio_punt(rbio, bch2_read_decrypt_err, RBIO_CONTEXT_UNBOUND, system_dfl_wq);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -963,7 +963,7 @@ static void bch2_read_endio(struct bio *bio)
|
||||
rbio->promote ||
|
||||
crc_is_compressed(rbio->pick.crc) ||
|
||||
bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
|
||||
context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
|
||||
context = RBIO_CONTEXT_UNBOUND, wq = system_dfl_wq;
|
||||
else if (rbio->pick.crc.csum_type)
|
||||
context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
|
||||
|
||||
|
||||
@@ -1362,7 +1362,7 @@ int bch2_journal_read(struct bch_fs *c,
|
||||
BCH_DEV_READ_REF_journal_read))
|
||||
closure_call(&ca->journal.read,
|
||||
bch2_journal_read_device,
|
||||
system_unbound_wq,
|
||||
system_dfl_wq,
|
||||
&jlist.cl);
|
||||
else
|
||||
degraded = true;
|
||||
|
||||
@@ -2031,7 +2031,7 @@ void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info)
|
||||
btrfs_reclaim_sweep(fs_info);
|
||||
spin_lock(&fs_info->unused_bgs_lock);
|
||||
if (!list_empty(&fs_info->reclaim_bgs))
|
||||
queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work);
|
||||
queue_work(system_dfl_wq, &fs_info->reclaim_bgs_work);
|
||||
spin_unlock(&fs_info->unused_bgs_lock);
|
||||
}
|
||||
|
||||
|
||||
@@ -1372,7 +1372,7 @@ void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
|
||||
if (atomic64_cmpxchg(&fs_info->em_shrinker_nr_to_scan, 0, nr_to_scan) != 0)
|
||||
return;
|
||||
|
||||
queue_work(system_unbound_wq, &fs_info->em_shrinker_work);
|
||||
queue_work(system_dfl_wq, &fs_info->em_shrinker_work);
|
||||
}
|
||||
|
||||
void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info)
|
||||
|
||||
@@ -1830,7 +1830,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
|
||||
space_info->flags,
|
||||
orig_bytes, flush,
|
||||
"enospc");
|
||||
queue_work(system_unbound_wq, async_work);
|
||||
queue_work(system_dfl_wq, async_work);
|
||||
}
|
||||
} else {
|
||||
list_add_tail(&ticket.list,
|
||||
@@ -1847,7 +1847,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
|
||||
need_preemptive_reclaim(fs_info, space_info)) {
|
||||
trace_btrfs_trigger_flush(fs_info, space_info->flags,
|
||||
orig_bytes, flush, "preempt");
|
||||
queue_work(system_unbound_wq,
|
||||
queue_work(system_dfl_wq,
|
||||
&fs_info->preempt_reclaim_work);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2488,7 +2488,7 @@ void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
|
||||
refcount_inc(&eb->refs);
|
||||
bg->last_eb = eb;
|
||||
INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
|
||||
queue_work(system_unbound_wq, &bg->zone_finish_work);
|
||||
queue_work(system_dfl_wq, &bg->zone_finish_work);
|
||||
}
|
||||
|
||||
void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
|
||||
|
||||
@@ -635,7 +635,7 @@ static int umh_coredump_setup(struct subprocess_info *info, struct cred *new)
|
||||
|
||||
/*
|
||||
* Usermode helpers are childen of either
|
||||
* system_unbound_wq or of kthreadd. So we know that
|
||||
* system_dfl_wq or of kthreadd. So we know that
|
||||
* we're starting off with a clean file descriptor
|
||||
* table. So we should always be able to use
|
||||
* COREDUMP_PIDFD_NUMBER as our file descriptor value.
|
||||
|
||||
@@ -3995,7 +3995,7 @@ void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
|
||||
list_splice_tail(&freed_data_list, &sbi->s_discard_list);
|
||||
spin_unlock(&sbi->s_md_lock);
|
||||
if (wake)
|
||||
queue_work(system_unbound_wq, &sbi->s_discard_work);
|
||||
queue_work(system_dfl_wq, &sbi->s_discard_work);
|
||||
} else {
|
||||
list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
|
||||
kmem_cache_free(ext4_free_data_cachep, entry);
|
||||
|
||||
@@ -321,7 +321,7 @@ void netfs_wake_collector(struct netfs_io_request *rreq)
|
||||
{
|
||||
if (test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags) &&
|
||||
!test_bit(NETFS_RREQ_RETRYING, &rreq->flags)) {
|
||||
queue_work(system_unbound_wq, &rreq->work);
|
||||
queue_work(system_dfl_wq, &rreq->work);
|
||||
} else {
|
||||
trace_netfs_rreq(rreq, netfs_rreq_trace_wake_queue);
|
||||
wake_up(&rreq->waitq);
|
||||
|
||||
@@ -163,7 +163,7 @@ void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace
|
||||
dead = __refcount_dec_and_test(&rreq->ref, &r);
|
||||
trace_netfs_rreq_ref(debug_id, r - 1, what);
|
||||
if (dead)
|
||||
WARN_ON(!queue_work(system_unbound_wq, &rreq->cleanup_work));
|
||||
WARN_ON(!queue_work(system_dfl_wq, &rreq->cleanup_work));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -113,7 +113,7 @@ static void
|
||||
nfsd_file_schedule_laundrette(void)
|
||||
{
|
||||
if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags))
|
||||
queue_delayed_work(system_unbound_wq, &nfsd_filecache_laundrette,
|
||||
queue_delayed_work(system_dfl_wq, &nfsd_filecache_laundrette,
|
||||
NFSD_LAUNDRETTE_DELAY);
|
||||
}
|
||||
|
||||
|
||||
@@ -428,7 +428,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
|
||||
conn->destroy_next = connector_destroy_list;
|
||||
connector_destroy_list = conn;
|
||||
spin_unlock(&destroy_lock);
|
||||
queue_work(system_unbound_wq, &connector_reaper_work);
|
||||
queue_work(system_dfl_wq, &connector_reaper_work);
|
||||
}
|
||||
/*
|
||||
* Note that we didn't update flags telling whether inode cares about
|
||||
@@ -439,7 +439,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
|
||||
spin_lock(&destroy_lock);
|
||||
list_add(&mark->g_list, &destroy_list);
|
||||
spin_unlock(&destroy_lock);
|
||||
queue_delayed_work(system_unbound_wq, &reaper_work,
|
||||
queue_delayed_work(system_dfl_wq, &reaper_work,
|
||||
FSNOTIFY_REAPER_DELAY);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsnotify_put_mark);
|
||||
|
||||
@@ -881,7 +881,7 @@ void dqput(struct dquot *dquot)
|
||||
put_releasing_dquots(dquot);
|
||||
atomic_dec(&dquot->dq_count);
|
||||
spin_unlock(&dq_list_lock);
|
||||
queue_delayed_work(system_unbound_wq, "a_release_work, 1);
|
||||
queue_delayed_work(system_dfl_wq, "a_release_work, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(dqput);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user