PM / clk: Add support for adding a specific clock from device-tree
[deliverable/linux.git] / drivers / infiniband / hw / hfi1 / verbs_txreq.c
1 /*
2 * Copyright(c) 2016 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48 #include "hfi.h"
49 #include "verbs_txreq.h"
50 #include "qp.h"
51 #include "trace.h"
52
53 #define TXREQ_LEN 24
54
55 void hfi1_put_txreq(struct verbs_txreq *tx)
56 {
57 struct hfi1_ibdev *dev;
58 struct rvt_qp *qp;
59 unsigned long flags;
60 unsigned int seq;
61 struct hfi1_qp_priv *priv;
62
63 qp = tx->qp;
64 dev = to_idev(qp->ibqp.device);
65
66 if (tx->mr)
67 rvt_put_mr(tx->mr);
68
69 sdma_txclean(dd_from_dev(dev), &tx->txreq);
70
71 /* Free verbs_txreq and return to slab cache */
72 kmem_cache_free(dev->verbs_txreq_cache, tx);
73
74 do {
75 seq = read_seqbegin(&dev->iowait_lock);
76 if (!list_empty(&dev->txwait)) {
77 struct iowait *wait;
78
79 write_seqlock_irqsave(&dev->iowait_lock, flags);
80 wait = list_first_entry(&dev->txwait, struct iowait,
81 list);
82 qp = iowait_to_qp(wait);
83 priv = qp->priv;
84 list_del_init(&priv->s_iowait.list);
85 /* refcount held until actual wake up */
86 write_sequnlock_irqrestore(&dev->iowait_lock, flags);
87 hfi1_qp_wakeup(qp, RVT_S_WAIT_TX);
88 break;
89 }
90 } while (read_seqretry(&dev->iowait_lock, seq));
91 }
92
93 struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
94 struct rvt_qp *qp)
95 {
96 struct verbs_txreq *tx = ERR_PTR(-EBUSY);
97 unsigned long flags;
98
99 spin_lock_irqsave(&qp->s_lock, flags);
100 write_seqlock(&dev->iowait_lock);
101 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
102 struct hfi1_qp_priv *priv;
103
104 tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
105 if (tx)
106 goto out;
107 priv = qp->priv;
108 if (list_empty(&priv->s_iowait.list)) {
109 dev->n_txwait++;
110 qp->s_flags |= RVT_S_WAIT_TX;
111 list_add_tail(&priv->s_iowait.list, &dev->txwait);
112 trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX);
113 atomic_inc(&qp->refcount);
114 }
115 qp->s_flags &= ~RVT_S_BUSY;
116 }
117 out:
118 write_sequnlock(&dev->iowait_lock);
119 spin_unlock_irqrestore(&qp->s_lock, flags);
120 return tx;
121 }
122
123 static void verbs_txreq_kmem_cache_ctor(void *obj)
124 {
125 struct verbs_txreq *tx = (struct verbs_txreq *)obj;
126
127 memset(tx, 0, sizeof(*tx));
128 }
129
130 int verbs_txreq_init(struct hfi1_ibdev *dev)
131 {
132 char buf[TXREQ_LEN];
133 struct hfi1_devdata *dd = dd_from_dev(dev);
134
135 snprintf(buf, sizeof(buf), "hfi1_%u_vtxreq_cache", dd->unit);
136 dev->verbs_txreq_cache = kmem_cache_create(buf,
137 sizeof(struct verbs_txreq),
138 0, SLAB_HWCACHE_ALIGN,
139 verbs_txreq_kmem_cache_ctor);
140 if (!dev->verbs_txreq_cache)
141 return -ENOMEM;
142 return 0;
143 }
144
145 void verbs_txreq_exit(struct hfi1_ibdev *dev)
146 {
147 kmem_cache_destroy(dev->verbs_txreq_cache);
148 dev->verbs_txreq_cache = NULL;
149 }
This page took 0.033697 seconds and 5 git commands to generate.