xprtrdma: Saving IRQs no longer needed for rb_lock
authorChuck Lever <chuck.lever@oracle.com>
Sat, 24 Oct 2015 21:27:27 +0000 (17:27 -0400)
committerAnna Schumaker <Anna.Schumaker@Netapp.com>
Mon, 2 Nov 2015 18:45:15 +0000 (13:45 -0500)
Now that RPC replies are processed in a workqueue, there's no need
to disable IRQs when managing send and receive buffers. This saves
noticeable overhead per RPC.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Tested-By: Devesh Sharma <devesh.sharma@avagotech.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
net/sunrpc/xprtrdma/verbs.c

index 3dd5a7c951c5b66158f60ef6e6ac798a58b1263e..baa0523476a48e0bc50e7f3d469f81a73bab7243 100644 (file)
@@ -1063,24 +1063,23 @@ struct rpcrdma_req *
 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
 {
        struct rpcrdma_req *req;
-       unsigned long flags;
 
-       spin_lock_irqsave(&buffers->rb_lock, flags);
+       spin_lock(&buffers->rb_lock);
        if (list_empty(&buffers->rb_send_bufs))
                goto out_reqbuf;
        req = rpcrdma_buffer_get_req_locked(buffers);
        if (list_empty(&buffers->rb_recv_bufs))
                goto out_repbuf;
        req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
-       spin_unlock_irqrestore(&buffers->rb_lock, flags);
+       spin_unlock(&buffers->rb_lock);
        return req;
 
 out_reqbuf:
-       spin_unlock_irqrestore(&buffers->rb_lock, flags);
+       spin_unlock(&buffers->rb_lock);
        pr_warn("RPC:       %s: out of request buffers\n", __func__);
        return NULL;
 out_repbuf:
-       spin_unlock_irqrestore(&buffers->rb_lock, flags);
+       spin_unlock(&buffers->rb_lock);
        pr_warn("RPC:       %s: out of reply buffers\n", __func__);
        req->rl_reply = NULL;
        return req;
@@ -1095,16 +1094,15 @@ rpcrdma_buffer_put(struct rpcrdma_req *req)
 {
        struct rpcrdma_buffer *buffers = req->rl_buffer;
        struct rpcrdma_rep *rep = req->rl_reply;
-       unsigned long flags;
 
        req->rl_niovs = 0;
        req->rl_reply = NULL;
 
-       spin_lock_irqsave(&buffers->rb_lock, flags);
+       spin_lock(&buffers->rb_lock);
        list_add_tail(&req->rl_free, &buffers->rb_send_bufs);
        if (rep)
                list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
-       spin_unlock_irqrestore(&buffers->rb_lock, flags);
+       spin_unlock(&buffers->rb_lock);
 }
 
 /*
@@ -1115,12 +1113,11 @@ void
 rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
 {
        struct rpcrdma_buffer *buffers = req->rl_buffer;
-       unsigned long flags;
 
-       spin_lock_irqsave(&buffers->rb_lock, flags);
+       spin_lock(&buffers->rb_lock);
        if (!list_empty(&buffers->rb_recv_bufs))
                req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
-       spin_unlock_irqrestore(&buffers->rb_lock, flags);
+       spin_unlock(&buffers->rb_lock);
 }
 
 /*
@@ -1131,11 +1128,10 @@ void
 rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
 {
        struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
-       unsigned long flags;
 
-       spin_lock_irqsave(&buffers->rb_lock, flags);
+       spin_lock(&buffers->rb_lock);
        list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
-       spin_unlock_irqrestore(&buffers->rb_lock, flags);
+       spin_unlock(&buffers->rb_lock);
 }
 
 /*
This page took 0.039729 seconds and 5 git commands to generate.