ALSA: xen: Use guard() for mutex locks

Replace the manual mutex lock/unlock pairs with guard() for code
simplification.

Only code refactoring, and no behavior change.

Signed-off-by: Takashi Iwai <tiwai@suse.de>
Link: https://patch.msgid.link/20250829151335.7342-9-tiwai@suse.de
This commit is contained in:
Takashi Iwai
2025-08-29 17:13:22 +02:00
parent ab770b4163
commit 3ddf4f9716
2 changed files with 52 additions and 61 deletions

View File

@@ -62,12 +62,12 @@ int xen_snd_front_stream_query_hw_param(struct xen_snd_front_evtchnl *evtchnl,
struct xensnd_req *req; struct xensnd_req *req;
int ret; int ret;
mutex_lock(&evtchnl->u.req.req_io_lock); guard(mutex)(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock); scoped_guard(mutex, &evtchnl->ring_io_lock) {
req = be_stream_prepare_req(evtchnl, XENSND_OP_HW_PARAM_QUERY); req = be_stream_prepare_req(evtchnl, XENSND_OP_HW_PARAM_QUERY);
req->op.hw_param = *hw_param_req; req->op.hw_param = *hw_param_req;
mutex_unlock(&evtchnl->ring_io_lock); }
ret = be_stream_do_io(evtchnl); ret = be_stream_do_io(evtchnl);
@@ -77,7 +77,6 @@ int xen_snd_front_stream_query_hw_param(struct xen_snd_front_evtchnl *evtchnl,
if (ret == 0) if (ret == 0)
*hw_param_resp = evtchnl->u.req.resp.hw_param; *hw_param_resp = evtchnl->u.req.resp.hw_param;
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret; return ret;
} }
@@ -90,25 +89,24 @@ int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl,
struct xensnd_req *req; struct xensnd_req *req;
int ret; int ret;
mutex_lock(&evtchnl->u.req.req_io_lock); guard(mutex)(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock); scoped_guard(mutex, &evtchnl->ring_io_lock) {
req = be_stream_prepare_req(evtchnl, XENSND_OP_OPEN); req = be_stream_prepare_req(evtchnl, XENSND_OP_OPEN);
req->op.open.pcm_format = format; req->op.open.pcm_format = format;
req->op.open.pcm_channels = channels; req->op.open.pcm_channels = channels;
req->op.open.pcm_rate = rate; req->op.open.pcm_rate = rate;
req->op.open.buffer_sz = buffer_sz; req->op.open.buffer_sz = buffer_sz;
req->op.open.period_sz = period_sz; req->op.open.period_sz = period_sz;
req->op.open.gref_directory = req->op.open.gref_directory =
xen_front_pgdir_shbuf_get_dir_start(shbuf); xen_front_pgdir_shbuf_get_dir_start(shbuf);
mutex_unlock(&evtchnl->ring_io_lock); }
ret = be_stream_do_io(evtchnl); ret = be_stream_do_io(evtchnl);
if (ret == 0) if (ret == 0)
ret = be_stream_wait_io(evtchnl); ret = be_stream_wait_io(evtchnl);
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret; return ret;
} }
@@ -117,18 +115,17 @@ int xen_snd_front_stream_close(struct xen_snd_front_evtchnl *evtchnl)
__always_unused struct xensnd_req *req; __always_unused struct xensnd_req *req;
int ret; int ret;
mutex_lock(&evtchnl->u.req.req_io_lock); guard(mutex)(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock); scoped_guard(mutex, &evtchnl->ring_io_lock) {
req = be_stream_prepare_req(evtchnl, XENSND_OP_CLOSE); req = be_stream_prepare_req(evtchnl, XENSND_OP_CLOSE);
mutex_unlock(&evtchnl->ring_io_lock); }
ret = be_stream_do_io(evtchnl); ret = be_stream_do_io(evtchnl);
if (ret == 0) if (ret == 0)
ret = be_stream_wait_io(evtchnl); ret = be_stream_wait_io(evtchnl);
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret; return ret;
} }
@@ -138,20 +135,19 @@ int xen_snd_front_stream_write(struct xen_snd_front_evtchnl *evtchnl,
struct xensnd_req *req; struct xensnd_req *req;
int ret; int ret;
mutex_lock(&evtchnl->u.req.req_io_lock); guard(mutex)(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock); scoped_guard(mutex, &evtchnl->ring_io_lock) {
req = be_stream_prepare_req(evtchnl, XENSND_OP_WRITE); req = be_stream_prepare_req(evtchnl, XENSND_OP_WRITE);
req->op.rw.length = count; req->op.rw.length = count;
req->op.rw.offset = pos; req->op.rw.offset = pos;
mutex_unlock(&evtchnl->ring_io_lock); }
ret = be_stream_do_io(evtchnl); ret = be_stream_do_io(evtchnl);
if (ret == 0) if (ret == 0)
ret = be_stream_wait_io(evtchnl); ret = be_stream_wait_io(evtchnl);
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret; return ret;
} }
@@ -161,20 +157,19 @@ int xen_snd_front_stream_read(struct xen_snd_front_evtchnl *evtchnl,
struct xensnd_req *req; struct xensnd_req *req;
int ret; int ret;
mutex_lock(&evtchnl->u.req.req_io_lock); guard(mutex)(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock); scoped_guard(mutex, &evtchnl->ring_io_lock) {
req = be_stream_prepare_req(evtchnl, XENSND_OP_READ); req = be_stream_prepare_req(evtchnl, XENSND_OP_READ);
req->op.rw.length = count; req->op.rw.length = count;
req->op.rw.offset = pos; req->op.rw.offset = pos;
mutex_unlock(&evtchnl->ring_io_lock); }
ret = be_stream_do_io(evtchnl); ret = be_stream_do_io(evtchnl);
if (ret == 0) if (ret == 0)
ret = be_stream_wait_io(evtchnl); ret = be_stream_wait_io(evtchnl);
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret; return ret;
} }
@@ -184,19 +179,18 @@ int xen_snd_front_stream_trigger(struct xen_snd_front_evtchnl *evtchnl,
struct xensnd_req *req; struct xensnd_req *req;
int ret; int ret;
mutex_lock(&evtchnl->u.req.req_io_lock); guard(mutex)(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock); scoped_guard(mutex, &evtchnl->ring_io_lock) {
req = be_stream_prepare_req(evtchnl, XENSND_OP_TRIGGER); req = be_stream_prepare_req(evtchnl, XENSND_OP_TRIGGER);
req->op.trigger.type = type; req->op.trigger.type = type;
mutex_unlock(&evtchnl->ring_io_lock); }
ret = be_stream_do_io(evtchnl); ret = be_stream_do_io(evtchnl);
if (ret == 0) if (ret == 0)
ret = be_stream_wait_io(evtchnl); ret = be_stream_wait_io(evtchnl);
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret; return ret;
} }

View File

@@ -28,7 +28,7 @@ static irqreturn_t evtchnl_interrupt_req(int irq, void *dev_id)
if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED)) if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
return IRQ_HANDLED; return IRQ_HANDLED;
mutex_lock(&channel->ring_io_lock); guard(mutex)(&channel->ring_io_lock);
again: again:
rp = channel->u.req.ring.sring->rsp_prod; rp = channel->u.req.ring.sring->rsp_prod;
@@ -80,7 +80,6 @@ again:
channel->u.req.ring.sring->rsp_event = i + 1; channel->u.req.ring.sring->rsp_event = i + 1;
} }
mutex_unlock(&channel->ring_io_lock);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@@ -93,13 +92,13 @@ static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED)) if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
return IRQ_HANDLED; return IRQ_HANDLED;
mutex_lock(&channel->ring_io_lock); guard(mutex)(&channel->ring_io_lock);
prod = page->in_prod; prod = page->in_prod;
/* Ensure we see ring contents up to prod. */ /* Ensure we see ring contents up to prod. */
virt_rmb(); virt_rmb();
if (prod == page->in_cons) if (prod == page->in_cons)
goto out; return IRQ_HANDLED;
/* /*
* Assume that the backend is trusted to always write sane values * Assume that the backend is trusted to always write sane values
@@ -125,8 +124,6 @@ static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
/* Ensure ring contents. */ /* Ensure ring contents. */
virt_wmb(); virt_wmb();
out:
mutex_unlock(&channel->ring_io_lock);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@@ -444,23 +441,23 @@ void xen_snd_front_evtchnl_pair_set_connected(struct xen_snd_front_evtchnl_pair
else else
state = EVTCHNL_STATE_DISCONNECTED; state = EVTCHNL_STATE_DISCONNECTED;
mutex_lock(&evt_pair->req.ring_io_lock); scoped_guard(mutex, &evt_pair->req.ring_io_lock) {
evt_pair->req.state = state; evt_pair->req.state = state;
mutex_unlock(&evt_pair->req.ring_io_lock); }
mutex_lock(&evt_pair->evt.ring_io_lock); scoped_guard(mutex, &evt_pair->evt.ring_io_lock) {
evt_pair->evt.state = state; evt_pair->evt.state = state;
mutex_unlock(&evt_pair->evt.ring_io_lock); }
} }
void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair *evt_pair) void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair *evt_pair)
{ {
mutex_lock(&evt_pair->req.ring_io_lock); scoped_guard(mutex, &evt_pair->req.ring_io_lock) {
evt_pair->req.evt_next_id = 0; evt_pair->req.evt_next_id = 0;
mutex_unlock(&evt_pair->req.ring_io_lock); }
mutex_lock(&evt_pair->evt.ring_io_lock); scoped_guard(mutex, &evt_pair->evt.ring_io_lock) {
evt_pair->evt.evt_next_id = 0; evt_pair->evt.evt_next_id = 0;
mutex_unlock(&evt_pair->evt.ring_io_lock); }
} }