mirror of
https://github.com/torvalds/linux.git
synced 2025-11-30 23:16:01 +07:00
Currently if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistentcy cannot be addressed without refactoring the API. alloc_workqueue() treats all queues as per-CPU by default, while unbound workqueues must opt-in via WQ_UNBOUND. This default is suboptimal: most workloads benefit from unbound queues, allowing the scheduler to place worker threads where they’re needed and reducing noise when CPUs are isolated. This patch adds a new WQ_PERCPU flag to all the fs subsystem users to explicitly request the use of the per-CPU behavior. Both flags coexist for one release cycle to allow callers to transition their calls. Once migration is complete, WQ_UNBOUND can be removed and unbound will become the implicit default. With the introduction of the WQ_PERCPU flag (equivalent to !WQ_UNBOUND), any alloc_workqueue() caller that doesn’t explicitly specify WQ_UNBOUND must now use WQ_PERCPU. All existing users have been updated accordingly. Suggested-by: Tejun Heo <tj@kernel.org> Signed-off-by: Marco Crivellari <marco.crivellari@suse.com> Link: https://lore.kernel.org/20250916082906.77439-4-marco.crivellari@suse.com Signed-off-by: Christian Brauner <brauner@kernel.org>
106 lines
2.1 KiB
C
106 lines
2.1 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/******************************************************************************
|
|
*******************************************************************************
|
|
**
|
|
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
|
** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
|
|
**
|
|
**
|
|
*******************************************************************************
|
|
******************************************************************************/
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include "dlm_internal.h"
|
|
#include "lockspace.h"
|
|
#include "lock.h"
|
|
#include "user.h"
|
|
#include "memory.h"
|
|
#include "config.h"
|
|
#include "midcomms.h"
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/dlm.h>
|
|
|
|
struct workqueue_struct *dlm_wq;
|
|
|
|
static int __init init_dlm(void)
|
|
{
|
|
int error;
|
|
|
|
error = dlm_memory_init();
|
|
if (error)
|
|
goto out;
|
|
|
|
dlm_midcomms_init();
|
|
|
|
error = dlm_lockspace_init();
|
|
if (error)
|
|
goto out_mem;
|
|
|
|
error = dlm_config_init();
|
|
if (error)
|
|
goto out_lockspace;
|
|
|
|
dlm_register_debugfs();
|
|
|
|
error = dlm_user_init();
|
|
if (error)
|
|
goto out_debug;
|
|
|
|
error = dlm_plock_init();
|
|
if (error)
|
|
goto out_user;
|
|
|
|
dlm_wq = alloc_workqueue("dlm_wq", WQ_PERCPU, 0);
|
|
if (!dlm_wq) {
|
|
error = -ENOMEM;
|
|
goto out_plock;
|
|
}
|
|
|
|
printk("DLM installed\n");
|
|
|
|
return 0;
|
|
|
|
out_plock:
|
|
dlm_plock_exit();
|
|
out_user:
|
|
dlm_user_exit();
|
|
out_debug:
|
|
dlm_unregister_debugfs();
|
|
dlm_config_exit();
|
|
out_lockspace:
|
|
dlm_lockspace_exit();
|
|
out_mem:
|
|
dlm_midcomms_exit();
|
|
dlm_memory_exit();
|
|
out:
|
|
return error;
|
|
}
|
|
|
|
static void __exit exit_dlm(void)
|
|
{
|
|
/* be sure every pending work e.g. freeing is done */
|
|
destroy_workqueue(dlm_wq);
|
|
dlm_plock_exit();
|
|
dlm_user_exit();
|
|
dlm_config_exit();
|
|
dlm_lockspace_exit();
|
|
dlm_midcomms_exit();
|
|
dlm_unregister_debugfs();
|
|
dlm_memory_exit();
|
|
}
|
|
|
|
module_init(init_dlm);
|
|
module_exit(exit_dlm);
|
|
|
|
MODULE_DESCRIPTION("Distributed Lock Manager");
|
|
MODULE_AUTHOR("Red Hat, Inc.");
|
|
MODULE_LICENSE("GPL");
|
|
|
|
EXPORT_SYMBOL_GPL(dlm_new_lockspace);
|
|
EXPORT_SYMBOL_GPL(dlm_release_lockspace);
|
|
EXPORT_SYMBOL_GPL(dlm_lock);
|
|
EXPORT_SYMBOL_GPL(dlm_unlock);
|
|
|