ALSA: xen: Use guard() for mutex locks

Replace the manual mutex lock/unlock pairs with guard() for code
simplification.

Only code refactoring, and no behavior change.

Signed-off-by: Takashi Iwai <tiwai@suse.de>
Link: https://patch.msgid.link/20250829151335.7342-9-tiwai@suse.de
This commit is contained in:
Takashi Iwai
2025-08-29 17:13:22 +02:00
parent ab770b4163
commit 3ddf4f9716
2 changed files with 52 additions and 61 deletions

View File

@@ -62,12 +62,12 @@ int xen_snd_front_stream_query_hw_param(struct xen_snd_front_evtchnl *evtchnl,
struct xensnd_req *req;
int ret;
mutex_lock(&evtchnl->u.req.req_io_lock);
guard(mutex)(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock);
req = be_stream_prepare_req(evtchnl, XENSND_OP_HW_PARAM_QUERY);
req->op.hw_param = *hw_param_req;
mutex_unlock(&evtchnl->ring_io_lock);
scoped_guard(mutex, &evtchnl->ring_io_lock) {
req = be_stream_prepare_req(evtchnl, XENSND_OP_HW_PARAM_QUERY);
req->op.hw_param = *hw_param_req;
}
ret = be_stream_do_io(evtchnl);
@@ -77,7 +77,6 @@ int xen_snd_front_stream_query_hw_param(struct xen_snd_front_evtchnl *evtchnl,
if (ret == 0)
*hw_param_resp = evtchnl->u.req.resp.hw_param;
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}
@@ -90,25 +89,24 @@ int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl,
struct xensnd_req *req;
int ret;
mutex_lock(&evtchnl->u.req.req_io_lock);
guard(mutex)(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock);
req = be_stream_prepare_req(evtchnl, XENSND_OP_OPEN);
req->op.open.pcm_format = format;
req->op.open.pcm_channels = channels;
req->op.open.pcm_rate = rate;
req->op.open.buffer_sz = buffer_sz;
req->op.open.period_sz = period_sz;
req->op.open.gref_directory =
xen_front_pgdir_shbuf_get_dir_start(shbuf);
mutex_unlock(&evtchnl->ring_io_lock);
scoped_guard(mutex, &evtchnl->ring_io_lock) {
req = be_stream_prepare_req(evtchnl, XENSND_OP_OPEN);
req->op.open.pcm_format = format;
req->op.open.pcm_channels = channels;
req->op.open.pcm_rate = rate;
req->op.open.buffer_sz = buffer_sz;
req->op.open.period_sz = period_sz;
req->op.open.gref_directory =
xen_front_pgdir_shbuf_get_dir_start(shbuf);
}
ret = be_stream_do_io(evtchnl);
if (ret == 0)
ret = be_stream_wait_io(evtchnl);
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}
@@ -117,18 +115,17 @@ int xen_snd_front_stream_close(struct xen_snd_front_evtchnl *evtchnl)
__always_unused struct xensnd_req *req;
int ret;
mutex_lock(&evtchnl->u.req.req_io_lock);
guard(mutex)(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock);
req = be_stream_prepare_req(evtchnl, XENSND_OP_CLOSE);
mutex_unlock(&evtchnl->ring_io_lock);
scoped_guard(mutex, &evtchnl->ring_io_lock) {
req = be_stream_prepare_req(evtchnl, XENSND_OP_CLOSE);
}
ret = be_stream_do_io(evtchnl);
if (ret == 0)
ret = be_stream_wait_io(evtchnl);
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}
@@ -138,20 +135,19 @@ int xen_snd_front_stream_write(struct xen_snd_front_evtchnl *evtchnl,
struct xensnd_req *req;
int ret;
mutex_lock(&evtchnl->u.req.req_io_lock);
guard(mutex)(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock);
req = be_stream_prepare_req(evtchnl, XENSND_OP_WRITE);
req->op.rw.length = count;
req->op.rw.offset = pos;
mutex_unlock(&evtchnl->ring_io_lock);
scoped_guard(mutex, &evtchnl->ring_io_lock) {
req = be_stream_prepare_req(evtchnl, XENSND_OP_WRITE);
req->op.rw.length = count;
req->op.rw.offset = pos;
}
ret = be_stream_do_io(evtchnl);
if (ret == 0)
ret = be_stream_wait_io(evtchnl);
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}
@@ -161,20 +157,19 @@ int xen_snd_front_stream_read(struct xen_snd_front_evtchnl *evtchnl,
struct xensnd_req *req;
int ret;
mutex_lock(&evtchnl->u.req.req_io_lock);
guard(mutex)(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock);
req = be_stream_prepare_req(evtchnl, XENSND_OP_READ);
req->op.rw.length = count;
req->op.rw.offset = pos;
mutex_unlock(&evtchnl->ring_io_lock);
scoped_guard(mutex, &evtchnl->ring_io_lock) {
req = be_stream_prepare_req(evtchnl, XENSND_OP_READ);
req->op.rw.length = count;
req->op.rw.offset = pos;
}
ret = be_stream_do_io(evtchnl);
if (ret == 0)
ret = be_stream_wait_io(evtchnl);
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}
@@ -184,19 +179,18 @@ int xen_snd_front_stream_trigger(struct xen_snd_front_evtchnl *evtchnl,
struct xensnd_req *req;
int ret;
mutex_lock(&evtchnl->u.req.req_io_lock);
guard(mutex)(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock);
req = be_stream_prepare_req(evtchnl, XENSND_OP_TRIGGER);
req->op.trigger.type = type;
mutex_unlock(&evtchnl->ring_io_lock);
scoped_guard(mutex, &evtchnl->ring_io_lock) {
req = be_stream_prepare_req(evtchnl, XENSND_OP_TRIGGER);
req->op.trigger.type = type;
}
ret = be_stream_do_io(evtchnl);
if (ret == 0)
ret = be_stream_wait_io(evtchnl);
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}

View File

@@ -28,7 +28,7 @@ static irqreturn_t evtchnl_interrupt_req(int irq, void *dev_id)
if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
return IRQ_HANDLED;
mutex_lock(&channel->ring_io_lock);
guard(mutex)(&channel->ring_io_lock);
again:
rp = channel->u.req.ring.sring->rsp_prod;
@@ -80,7 +80,6 @@ again:
channel->u.req.ring.sring->rsp_event = i + 1;
}
mutex_unlock(&channel->ring_io_lock);
return IRQ_HANDLED;
}
@@ -93,13 +92,13 @@ static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
return IRQ_HANDLED;
mutex_lock(&channel->ring_io_lock);
guard(mutex)(&channel->ring_io_lock);
prod = page->in_prod;
/* Ensure we see ring contents up to prod. */
virt_rmb();
if (prod == page->in_cons)
goto out;
return IRQ_HANDLED;
/*
* Assume that the backend is trusted to always write sane values
@@ -125,8 +124,6 @@ static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
/* Ensure ring contents. */
virt_wmb();
out:
mutex_unlock(&channel->ring_io_lock);
return IRQ_HANDLED;
}
@@ -444,23 +441,23 @@ void xen_snd_front_evtchnl_pair_set_connected(struct xen_snd_front_evtchnl_pair
else
state = EVTCHNL_STATE_DISCONNECTED;
mutex_lock(&evt_pair->req.ring_io_lock);
evt_pair->req.state = state;
mutex_unlock(&evt_pair->req.ring_io_lock);
scoped_guard(mutex, &evt_pair->req.ring_io_lock) {
evt_pair->req.state = state;
}
mutex_lock(&evt_pair->evt.ring_io_lock);
evt_pair->evt.state = state;
mutex_unlock(&evt_pair->evt.ring_io_lock);
scoped_guard(mutex, &evt_pair->evt.ring_io_lock) {
evt_pair->evt.state = state;
}
}
void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair *evt_pair)
{
mutex_lock(&evt_pair->req.ring_io_lock);
evt_pair->req.evt_next_id = 0;
mutex_unlock(&evt_pair->req.ring_io_lock);
scoped_guard(mutex, &evt_pair->req.ring_io_lock) {
evt_pair->req.evt_next_id = 0;
}
mutex_lock(&evt_pair->evt.ring_io_lock);
evt_pair->evt.evt_next_id = 0;
mutex_unlock(&evt_pair->evt.ring_io_lock);
scoped_guard(mutex, &evt_pair->evt.ring_io_lock) {
evt_pair->evt.evt_next_id = 0;
}
}