Merge remote-tracking branch 'mailbox/mailbox-for-next'
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / dev.c
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mlx5/driver.h>
34 #include "mlx5_core.h"
35
36 static LIST_HEAD(intf_list);
37 static LIST_HEAD(mlx5_dev_list);
38 /* intf dev list mutex */
39 static DEFINE_MUTEX(mlx5_intf_mutex);
40
41 struct mlx5_device_context {
42 struct list_head list;
43 struct mlx5_interface *intf;
44 void *context;
45 unsigned long state;
46 };
47
48 enum {
49 MLX5_INTERFACE_ADDED,
50 MLX5_INTERFACE_ATTACHED,
51 };
52
53 void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
54 {
55 struct mlx5_device_context *dev_ctx;
56 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
57
58 if (!mlx5_lag_intf_add(intf, priv))
59 return;
60
61 dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
62 if (!dev_ctx)
63 return;
64
65 dev_ctx->intf = intf;
66 dev_ctx->context = intf->add(dev);
67 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
68 if (intf->attach)
69 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
70
71 if (dev_ctx->context) {
72 spin_lock_irq(&priv->ctx_lock);
73 list_add_tail(&dev_ctx->list, &priv->ctx_list);
74 spin_unlock_irq(&priv->ctx_lock);
75 } else {
76 kfree(dev_ctx);
77 }
78 }
79
80 static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
81 struct mlx5_priv *priv)
82 {
83 struct mlx5_device_context *dev_ctx;
84
85 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
86 if (dev_ctx->intf == intf)
87 return dev_ctx;
88 return NULL;
89 }
90
91 void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
92 {
93 struct mlx5_device_context *dev_ctx;
94 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
95
96 dev_ctx = mlx5_get_device(intf, priv);
97 if (!dev_ctx)
98 return;
99
100 spin_lock_irq(&priv->ctx_lock);
101 list_del(&dev_ctx->list);
102 spin_unlock_irq(&priv->ctx_lock);
103
104 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
105 intf->remove(dev, dev_ctx->context);
106
107 kfree(dev_ctx);
108 }
109
110 static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
111 {
112 struct mlx5_device_context *dev_ctx;
113 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
114
115 dev_ctx = mlx5_get_device(intf, priv);
116 if (!dev_ctx)
117 return;
118
119 if (intf->attach) {
120 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
121 return;
122 intf->attach(dev, dev_ctx->context);
123 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
124 } else {
125 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
126 return;
127 dev_ctx->context = intf->add(dev);
128 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
129 }
130 }
131
132 void mlx5_attach_device(struct mlx5_core_dev *dev)
133 {
134 struct mlx5_priv *priv = &dev->priv;
135 struct mlx5_interface *intf;
136
137 mutex_lock(&mlx5_intf_mutex);
138 list_for_each_entry(intf, &intf_list, list)
139 mlx5_attach_interface(intf, priv);
140 mutex_unlock(&mlx5_intf_mutex);
141 }
142
143 static void mlx5_detach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
144 {
145 struct mlx5_device_context *dev_ctx;
146 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
147
148 dev_ctx = mlx5_get_device(intf, priv);
149 if (!dev_ctx)
150 return;
151
152 if (intf->detach) {
153 if (!test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
154 return;
155 intf->detach(dev, dev_ctx->context);
156 clear_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
157 } else {
158 if (!test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
159 return;
160 intf->remove(dev, dev_ctx->context);
161 clear_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
162 }
163 }
164
165 void mlx5_detach_device(struct mlx5_core_dev *dev)
166 {
167 struct mlx5_priv *priv = &dev->priv;
168 struct mlx5_interface *intf;
169
170 mutex_lock(&mlx5_intf_mutex);
171 list_for_each_entry(intf, &intf_list, list)
172 mlx5_detach_interface(intf, priv);
173 mutex_unlock(&mlx5_intf_mutex);
174 }
175
176 bool mlx5_device_registered(struct mlx5_core_dev *dev)
177 {
178 struct mlx5_priv *priv;
179 bool found = false;
180
181 mutex_lock(&mlx5_intf_mutex);
182 list_for_each_entry(priv, &mlx5_dev_list, dev_list)
183 if (priv == &dev->priv)
184 found = true;
185 mutex_unlock(&mlx5_intf_mutex);
186
187 return found;
188 }
189
190 int mlx5_register_device(struct mlx5_core_dev *dev)
191 {
192 struct mlx5_priv *priv = &dev->priv;
193 struct mlx5_interface *intf;
194
195 mutex_lock(&mlx5_intf_mutex);
196 list_add_tail(&priv->dev_list, &mlx5_dev_list);
197 list_for_each_entry(intf, &intf_list, list)
198 mlx5_add_device(intf, priv);
199 mutex_unlock(&mlx5_intf_mutex);
200
201 return 0;
202 }
203
204 void mlx5_unregister_device(struct mlx5_core_dev *dev)
205 {
206 struct mlx5_priv *priv = &dev->priv;
207 struct mlx5_interface *intf;
208
209 mutex_lock(&mlx5_intf_mutex);
210 list_for_each_entry(intf, &intf_list, list)
211 mlx5_remove_device(intf, priv);
212 list_del(&priv->dev_list);
213 mutex_unlock(&mlx5_intf_mutex);
214 }
215
216 int mlx5_register_interface(struct mlx5_interface *intf)
217 {
218 struct mlx5_priv *priv;
219
220 if (!intf->add || !intf->remove)
221 return -EINVAL;
222
223 mutex_lock(&mlx5_intf_mutex);
224 list_add_tail(&intf->list, &intf_list);
225 list_for_each_entry(priv, &mlx5_dev_list, dev_list)
226 mlx5_add_device(intf, priv);
227 mutex_unlock(&mlx5_intf_mutex);
228
229 return 0;
230 }
231 EXPORT_SYMBOL(mlx5_register_interface);
232
233 void mlx5_unregister_interface(struct mlx5_interface *intf)
234 {
235 struct mlx5_priv *priv;
236
237 mutex_lock(&mlx5_intf_mutex);
238 list_for_each_entry(priv, &mlx5_dev_list, dev_list)
239 mlx5_remove_device(intf, priv);
240 list_del(&intf->list);
241 mutex_unlock(&mlx5_intf_mutex);
242 }
243 EXPORT_SYMBOL(mlx5_unregister_interface);
244
245 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
246 {
247 struct mlx5_priv *priv = &mdev->priv;
248 struct mlx5_device_context *dev_ctx;
249 unsigned long flags;
250 void *result = NULL;
251
252 spin_lock_irqsave(&priv->ctx_lock, flags);
253
254 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
255 if ((dev_ctx->intf->protocol == protocol) &&
256 dev_ctx->intf->get_dev) {
257 result = dev_ctx->intf->get_dev(dev_ctx->context);
258 break;
259 }
260
261 spin_unlock_irqrestore(&priv->ctx_lock, flags);
262
263 return result;
264 }
265 EXPORT_SYMBOL(mlx5_get_protocol_dev);
266
267 /* Must be called with intf_mutex held */
268 void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
269 {
270 struct mlx5_interface *intf;
271
272 list_for_each_entry(intf, &intf_list, list)
273 if (intf->protocol == protocol) {
274 mlx5_add_device(intf, &dev->priv);
275 break;
276 }
277 }
278
279 /* Must be called with intf_mutex held */
280 void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
281 {
282 struct mlx5_interface *intf;
283
284 list_for_each_entry(intf, &intf_list, list)
285 if (intf->protocol == protocol) {
286 mlx5_remove_device(intf, &dev->priv);
287 break;
288 }
289 }
290
291 static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
292 {
293 return (u16)((dev->pdev->bus->number << 8) |
294 PCI_SLOT(dev->pdev->devfn));
295 }
296
297 /* Must be called with intf_mutex held */
298 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
299 {
300 u16 pci_id = mlx5_gen_pci_id(dev);
301 struct mlx5_core_dev *res = NULL;
302 struct mlx5_core_dev *tmp_dev;
303 struct mlx5_priv *priv;
304
305 list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
306 tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
307 if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
308 res = tmp_dev;
309 break;
310 }
311 }
312
313 return res;
314 }
315
316 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
317 unsigned long param)
318 {
319 struct mlx5_priv *priv = &dev->priv;
320 struct mlx5_device_context *dev_ctx;
321 unsigned long flags;
322
323 spin_lock_irqsave(&priv->ctx_lock, flags);
324
325 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
326 if (dev_ctx->intf->event)
327 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
328
329 spin_unlock_irqrestore(&priv->ctx_lock, flags);
330 }
331
332 void mlx5_dev_list_lock(void)
333 {
334 mutex_lock(&mlx5_intf_mutex);
335 }
336
337 void mlx5_dev_list_unlock(void)
338 {
339 mutex_unlock(&mlx5_intf_mutex);
340 }
341
342 int mlx5_dev_list_trylock(void)
343 {
344 return mutex_trylock(&mlx5_intf_mutex);
345 }
This page took 0.041014 seconds and 5 git commands to generate.