mlx4: Implement QP paravirtualization and maintain phys_pkey_cache for smp_snoop
[deliverable/linux.git] / include / linux / mlx4 / device.h
1 /*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef MLX4_DEVICE_H
34 #define MLX4_DEVICE_H
35
36 #include <linux/pci.h>
37 #include <linux/completion.h>
38 #include <linux/radix-tree.h>
39 #include <linux/cpu_rmap.h>
40
41 #include <linux/atomic.h>
42
43 #define MAX_MSIX_P_PORT 17
44 #define MAX_MSIX 64
45 #define MSIX_LEGACY_SZ 4
46 #define MIN_MSIX_P_PORT 5
47
48 enum {
49 MLX4_FLAG_MSI_X = 1 << 0,
50 MLX4_FLAG_OLD_PORT_CMDS = 1 << 1,
51 MLX4_FLAG_MASTER = 1 << 2,
52 MLX4_FLAG_SLAVE = 1 << 3,
53 MLX4_FLAG_SRIOV = 1 << 4,
54 };
55
56 enum {
57 MLX4_MAX_PORTS = 2,
58 MLX4_MAX_PORT_PKEYS = 128
59 };
60
61 /* base qkey for use in sriov tunnel-qp/proxy-qp communication.
62 * These qkeys must not be allowed for general use. This is a 64k range,
63 * and to test for violation, we use the mask (protect against future chg).
64 */
65 #define MLX4_RESERVED_QKEY_BASE (0xFFFF0000)
66 #define MLX4_RESERVED_QKEY_MASK (0xFFFF0000)
67
68 enum {
69 MLX4_BOARD_ID_LEN = 64
70 };
71
72 enum {
73 MLX4_MAX_NUM_PF = 16,
74 MLX4_MAX_NUM_VF = 64,
75 MLX4_MFUNC_MAX = 80,
76 MLX4_MAX_EQ_NUM = 1024,
77 MLX4_MFUNC_EQ_NUM = 4,
78 MLX4_MFUNC_MAX_EQES = 8,
79 MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1)
80 };
81
82 /* Driver supports 3 diffrent device methods to manage traffic steering:
83 * -device managed - High level API for ib and eth flow steering. FW is
84 * managing flow steering tables.
85 * - B0 steering mode - Common low level API for ib and (if supported) eth.
86 * - A0 steering mode - Limited low level API for eth. In case of IB,
87 * B0 mode is in use.
88 */
89 enum {
90 MLX4_STEERING_MODE_A0,
91 MLX4_STEERING_MODE_B0,
92 MLX4_STEERING_MODE_DEVICE_MANAGED
93 };
94
95 static inline const char *mlx4_steering_mode_str(int steering_mode)
96 {
97 switch (steering_mode) {
98 case MLX4_STEERING_MODE_A0:
99 return "A0 steering";
100
101 case MLX4_STEERING_MODE_B0:
102 return "B0 steering";
103
104 case MLX4_STEERING_MODE_DEVICE_MANAGED:
105 return "Device managed flow steering";
106
107 default:
108 return "Unrecognize steering mode";
109 }
110 }
111
112 enum {
113 MLX4_DEV_CAP_FLAG_RC = 1LL << 0,
114 MLX4_DEV_CAP_FLAG_UC = 1LL << 1,
115 MLX4_DEV_CAP_FLAG_UD = 1LL << 2,
116 MLX4_DEV_CAP_FLAG_XRC = 1LL << 3,
117 MLX4_DEV_CAP_FLAG_SRQ = 1LL << 6,
118 MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1LL << 7,
119 MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
120 MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
121 MLX4_DEV_CAP_FLAG_DPDP = 1LL << 12,
122 MLX4_DEV_CAP_FLAG_BLH = 1LL << 15,
123 MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1LL << 16,
124 MLX4_DEV_CAP_FLAG_APM = 1LL << 17,
125 MLX4_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
126 MLX4_DEV_CAP_FLAG_RAW_MCAST = 1LL << 19,
127 MLX4_DEV_CAP_FLAG_UD_AV_PORT = 1LL << 20,
128 MLX4_DEV_CAP_FLAG_UD_MCAST = 1LL << 21,
129 MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30,
130 MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32,
131 MLX4_DEV_CAP_FLAG_FCS_KEEP = 1LL << 34,
132 MLX4_DEV_CAP_FLAG_WOL_PORT1 = 1LL << 37,
133 MLX4_DEV_CAP_FLAG_WOL_PORT2 = 1LL << 38,
134 MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40,
135 MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
136 MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42,
137 MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48,
138 MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55,
139 MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
140 };
141
142 enum {
143 MLX4_DEV_CAP_FLAG2_RSS = 1LL << 0,
144 MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1,
145 MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2,
146 MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3
147 };
148
149 #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
150
151 enum {
152 MLX4_BMME_FLAG_LOCAL_INV = 1 << 6,
153 MLX4_BMME_FLAG_REMOTE_INV = 1 << 7,
154 MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
155 MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10,
156 MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11,
157 };
158
159 enum mlx4_event {
160 MLX4_EVENT_TYPE_COMP = 0x00,
161 MLX4_EVENT_TYPE_PATH_MIG = 0x01,
162 MLX4_EVENT_TYPE_COMM_EST = 0x02,
163 MLX4_EVENT_TYPE_SQ_DRAINED = 0x03,
164 MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13,
165 MLX4_EVENT_TYPE_SRQ_LIMIT = 0x14,
166 MLX4_EVENT_TYPE_CQ_ERROR = 0x04,
167 MLX4_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
168 MLX4_EVENT_TYPE_EEC_CATAS_ERROR = 0x06,
169 MLX4_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
170 MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
171 MLX4_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
172 MLX4_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
173 MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08,
174 MLX4_EVENT_TYPE_PORT_CHANGE = 0x09,
175 MLX4_EVENT_TYPE_EQ_OVERFLOW = 0x0f,
176 MLX4_EVENT_TYPE_ECC_DETECT = 0x0e,
177 MLX4_EVENT_TYPE_CMD = 0x0a,
178 MLX4_EVENT_TYPE_VEP_UPDATE = 0x19,
179 MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18,
180 MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b,
181 MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
182 MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
183 MLX4_EVENT_TYPE_NONE = 0xff,
184 };
185
186 enum {
187 MLX4_PORT_CHANGE_SUBTYPE_DOWN = 1,
188 MLX4_PORT_CHANGE_SUBTYPE_ACTIVE = 4
189 };
190
191 enum {
192 MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0,
193 };
194
195 enum {
196 MLX4_PERM_LOCAL_READ = 1 << 10,
197 MLX4_PERM_LOCAL_WRITE = 1 << 11,
198 MLX4_PERM_REMOTE_READ = 1 << 12,
199 MLX4_PERM_REMOTE_WRITE = 1 << 13,
200 MLX4_PERM_ATOMIC = 1 << 14
201 };
202
203 enum {
204 MLX4_OPCODE_NOP = 0x00,
205 MLX4_OPCODE_SEND_INVAL = 0x01,
206 MLX4_OPCODE_RDMA_WRITE = 0x08,
207 MLX4_OPCODE_RDMA_WRITE_IMM = 0x09,
208 MLX4_OPCODE_SEND = 0x0a,
209 MLX4_OPCODE_SEND_IMM = 0x0b,
210 MLX4_OPCODE_LSO = 0x0e,
211 MLX4_OPCODE_RDMA_READ = 0x10,
212 MLX4_OPCODE_ATOMIC_CS = 0x11,
213 MLX4_OPCODE_ATOMIC_FA = 0x12,
214 MLX4_OPCODE_MASKED_ATOMIC_CS = 0x14,
215 MLX4_OPCODE_MASKED_ATOMIC_FA = 0x15,
216 MLX4_OPCODE_BIND_MW = 0x18,
217 MLX4_OPCODE_FMR = 0x19,
218 MLX4_OPCODE_LOCAL_INVAL = 0x1b,
219 MLX4_OPCODE_CONFIG_CMD = 0x1f,
220
221 MLX4_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
222 MLX4_RECV_OPCODE_SEND = 0x01,
223 MLX4_RECV_OPCODE_SEND_IMM = 0x02,
224 MLX4_RECV_OPCODE_SEND_INVAL = 0x03,
225
226 MLX4_CQE_OPCODE_ERROR = 0x1e,
227 MLX4_CQE_OPCODE_RESIZE = 0x16,
228 };
229
230 enum {
231 MLX4_STAT_RATE_OFFSET = 5
232 };
233
234 enum mlx4_protocol {
235 MLX4_PROT_IB_IPV6 = 0,
236 MLX4_PROT_ETH,
237 MLX4_PROT_IB_IPV4,
238 MLX4_PROT_FCOE
239 };
240
241 enum {
242 MLX4_MTT_FLAG_PRESENT = 1
243 };
244
245 enum mlx4_qp_region {
246 MLX4_QP_REGION_FW = 0,
247 MLX4_QP_REGION_ETH_ADDR,
248 MLX4_QP_REGION_FC_ADDR,
249 MLX4_QP_REGION_FC_EXCH,
250 MLX4_NUM_QP_REGION
251 };
252
253 enum mlx4_port_type {
254 MLX4_PORT_TYPE_NONE = 0,
255 MLX4_PORT_TYPE_IB = 1,
256 MLX4_PORT_TYPE_ETH = 2,
257 MLX4_PORT_TYPE_AUTO = 3
258 };
259
260 enum mlx4_special_vlan_idx {
261 MLX4_NO_VLAN_IDX = 0,
262 MLX4_VLAN_MISS_IDX,
263 MLX4_VLAN_REGULAR
264 };
265
266 enum mlx4_steer_type {
267 MLX4_MC_STEER = 0,
268 MLX4_UC_STEER,
269 MLX4_NUM_STEERS
270 };
271
272 enum {
273 MLX4_NUM_FEXCH = 64 * 1024,
274 };
275
276 enum {
277 MLX4_MAX_FAST_REG_PAGES = 511,
278 };
279
280 enum {
281 MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14,
282 MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15,
283 MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16,
284 };
285
286 /* Port mgmt change event handling */
287 enum {
288 MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK = 1 << 0,
289 MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK = 1 << 1,
290 MLX4_EQ_PORT_INFO_LID_CHANGE_MASK = 1 << 2,
291 MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK = 1 << 3,
292 MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4,
293 };
294
295 #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
296 MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
297
298 static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
299 {
300 return (major << 32) | (minor << 16) | subminor;
301 }
302
303 struct mlx4_phys_caps {
304 u32 gid_phys_table_len[MLX4_MAX_PORTS + 1];
305 u32 pkey_phys_table_len[MLX4_MAX_PORTS + 1];
306 u32 num_phys_eqs;
307 };
308
309 struct mlx4_caps {
310 u64 fw_ver;
311 u32 function;
312 int num_ports;
313 int vl_cap[MLX4_MAX_PORTS + 1];
314 int ib_mtu_cap[MLX4_MAX_PORTS + 1];
315 __be32 ib_port_def_cap[MLX4_MAX_PORTS + 1];
316 u64 def_mac[MLX4_MAX_PORTS + 1];
317 int eth_mtu_cap[MLX4_MAX_PORTS + 1];
318 int gid_table_len[MLX4_MAX_PORTS + 1];
319 int pkey_table_len[MLX4_MAX_PORTS + 1];
320 int trans_type[MLX4_MAX_PORTS + 1];
321 int vendor_oui[MLX4_MAX_PORTS + 1];
322 int wavelength[MLX4_MAX_PORTS + 1];
323 u64 trans_code[MLX4_MAX_PORTS + 1];
324 int local_ca_ack_delay;
325 int num_uars;
326 u32 uar_page_size;
327 int bf_reg_size;
328 int bf_regs_per_page;
329 int max_sq_sg;
330 int max_rq_sg;
331 int num_qps;
332 int max_wqes;
333 int max_sq_desc_sz;
334 int max_rq_desc_sz;
335 int max_qp_init_rdma;
336 int max_qp_dest_rdma;
337 int sqp_start;
338 u32 base_sqpn;
339 u32 base_tunnel_sqpn;
340 int num_srqs;
341 int max_srq_wqes;
342 int max_srq_sge;
343 int reserved_srqs;
344 int num_cqs;
345 int max_cqes;
346 int reserved_cqs;
347 int num_eqs;
348 int reserved_eqs;
349 int num_comp_vectors;
350 int comp_pool;
351 int num_mpts;
352 int max_fmr_maps;
353 int num_mtts;
354 int fmr_reserved_mtts;
355 int reserved_mtts;
356 int reserved_mrws;
357 int reserved_uars;
358 int num_mgms;
359 int num_amgms;
360 int reserved_mcgs;
361 int num_qp_per_mgm;
362 int steering_mode;
363 int fs_log_max_ucast_qp_range_size;
364 int num_pds;
365 int reserved_pds;
366 int max_xrcds;
367 int reserved_xrcds;
368 int mtt_entry_sz;
369 u32 max_msg_sz;
370 u32 page_size_cap;
371 u64 flags;
372 u64 flags2;
373 u32 bmme_flags;
374 u32 reserved_lkey;
375 u16 stat_rate_support;
376 u8 port_width_cap[MLX4_MAX_PORTS + 1];
377 int max_gso_sz;
378 int max_rss_tbl_sz;
379 int reserved_qps_cnt[MLX4_NUM_QP_REGION];
380 int reserved_qps;
381 int reserved_qps_base[MLX4_NUM_QP_REGION];
382 int log_num_macs;
383 int log_num_vlans;
384 int log_num_prios;
385 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
386 u8 supported_type[MLX4_MAX_PORTS + 1];
387 u8 suggested_type[MLX4_MAX_PORTS + 1];
388 u8 default_sense[MLX4_MAX_PORTS + 1];
389 u32 port_mask[MLX4_MAX_PORTS + 1];
390 enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1];
391 u32 max_counters;
392 u8 port_ib_mtu[MLX4_MAX_PORTS + 1];
393 u16 sqp_demux;
394 };
395
396 struct mlx4_buf_list {
397 void *buf;
398 dma_addr_t map;
399 };
400
401 struct mlx4_buf {
402 struct mlx4_buf_list direct;
403 struct mlx4_buf_list *page_list;
404 int nbufs;
405 int npages;
406 int page_shift;
407 };
408
409 struct mlx4_mtt {
410 u32 offset;
411 int order;
412 int page_shift;
413 };
414
415 enum {
416 MLX4_DB_PER_PAGE = PAGE_SIZE / 4
417 };
418
419 struct mlx4_db_pgdir {
420 struct list_head list;
421 DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
422 DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
423 unsigned long *bits[2];
424 __be32 *db_page;
425 dma_addr_t db_dma;
426 };
427
428 struct mlx4_ib_user_db_page;
429
430 struct mlx4_db {
431 __be32 *db;
432 union {
433 struct mlx4_db_pgdir *pgdir;
434 struct mlx4_ib_user_db_page *user_page;
435 } u;
436 dma_addr_t dma;
437 int index;
438 int order;
439 };
440
441 struct mlx4_hwq_resources {
442 struct mlx4_db db;
443 struct mlx4_mtt mtt;
444 struct mlx4_buf buf;
445 };
446
447 struct mlx4_mr {
448 struct mlx4_mtt mtt;
449 u64 iova;
450 u64 size;
451 u32 key;
452 u32 pd;
453 u32 access;
454 int enabled;
455 };
456
457 struct mlx4_fmr {
458 struct mlx4_mr mr;
459 struct mlx4_mpt_entry *mpt;
460 __be64 *mtts;
461 dma_addr_t dma_handle;
462 int max_pages;
463 int max_maps;
464 int maps;
465 u8 page_shift;
466 };
467
468 struct mlx4_uar {
469 unsigned long pfn;
470 int index;
471 struct list_head bf_list;
472 unsigned free_bf_bmap;
473 void __iomem *map;
474 void __iomem *bf_map;
475 };
476
477 struct mlx4_bf {
478 unsigned long offset;
479 int buf_size;
480 struct mlx4_uar *uar;
481 void __iomem *reg;
482 };
483
484 struct mlx4_cq {
485 void (*comp) (struct mlx4_cq *);
486 void (*event) (struct mlx4_cq *, enum mlx4_event);
487
488 struct mlx4_uar *uar;
489
490 u32 cons_index;
491
492 __be32 *set_ci_db;
493 __be32 *arm_db;
494 int arm_sn;
495
496 int cqn;
497 unsigned vector;
498
499 atomic_t refcount;
500 struct completion free;
501 };
502
503 struct mlx4_qp {
504 void (*event) (struct mlx4_qp *, enum mlx4_event);
505
506 int qpn;
507
508 atomic_t refcount;
509 struct completion free;
510 };
511
512 struct mlx4_srq {
513 void (*event) (struct mlx4_srq *, enum mlx4_event);
514
515 int srqn;
516 int max;
517 int max_gs;
518 int wqe_shift;
519
520 atomic_t refcount;
521 struct completion free;
522 };
523
524 struct mlx4_av {
525 __be32 port_pd;
526 u8 reserved1;
527 u8 g_slid;
528 __be16 dlid;
529 u8 reserved2;
530 u8 gid_index;
531 u8 stat_rate;
532 u8 hop_limit;
533 __be32 sl_tclass_flowlabel;
534 u8 dgid[16];
535 };
536
537 struct mlx4_eth_av {
538 __be32 port_pd;
539 u8 reserved1;
540 u8 smac_idx;
541 u16 reserved2;
542 u8 reserved3;
543 u8 gid_index;
544 u8 stat_rate;
545 u8 hop_limit;
546 __be32 sl_tclass_flowlabel;
547 u8 dgid[16];
548 u32 reserved4[2];
549 __be16 vlan;
550 u8 mac[6];
551 };
552
553 union mlx4_ext_av {
554 struct mlx4_av ib;
555 struct mlx4_eth_av eth;
556 };
557
558 struct mlx4_counter {
559 u8 reserved1[3];
560 u8 counter_mode;
561 __be32 num_ifc;
562 u32 reserved2[2];
563 __be64 rx_frames;
564 __be64 rx_bytes;
565 __be64 tx_frames;
566 __be64 tx_bytes;
567 };
568
569 struct mlx4_dev {
570 struct pci_dev *pdev;
571 unsigned long flags;
572 unsigned long num_slaves;
573 struct mlx4_caps caps;
574 struct mlx4_phys_caps phys_caps;
575 struct radix_tree_root qp_table_tree;
576 u8 rev_id;
577 char board_id[MLX4_BOARD_ID_LEN];
578 int num_vfs;
579 u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
580 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
581 };
582
583 struct mlx4_eqe {
584 u8 reserved1;
585 u8 type;
586 u8 reserved2;
587 u8 subtype;
588 union {
589 u32 raw[6];
590 struct {
591 __be32 cqn;
592 } __packed comp;
593 struct {
594 u16 reserved1;
595 __be16 token;
596 u32 reserved2;
597 u8 reserved3[3];
598 u8 status;
599 __be64 out_param;
600 } __packed cmd;
601 struct {
602 __be32 qpn;
603 } __packed qp;
604 struct {
605 __be32 srqn;
606 } __packed srq;
607 struct {
608 __be32 cqn;
609 u32 reserved1;
610 u8 reserved2[3];
611 u8 syndrome;
612 } __packed cq_err;
613 struct {
614 u32 reserved1[2];
615 __be32 port;
616 } __packed port_change;
617 struct {
618 #define COMM_CHANNEL_BIT_ARRAY_SIZE 4
619 u32 reserved;
620 u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
621 } __packed comm_channel_arm;
622 struct {
623 u8 port;
624 u8 reserved[3];
625 __be64 mac;
626 } __packed mac_update;
627 struct {
628 __be32 slave_id;
629 } __packed flr_event;
630 struct {
631 __be16 current_temperature;
632 __be16 warning_threshold;
633 } __packed warming;
634 struct {
635 u8 reserved[3];
636 u8 port;
637 union {
638 struct {
639 __be16 mstr_sm_lid;
640 __be16 port_lid;
641 __be32 changed_attr;
642 u8 reserved[3];
643 u8 mstr_sm_sl;
644 __be64 gid_prefix;
645 } __packed port_info;
646 struct {
647 __be32 block_ptr;
648 __be32 tbl_entries_mask;
649 } __packed tbl_change_info;
650 } params;
651 } __packed port_mgmt_change;
652 } event;
653 u8 slave_id;
654 u8 reserved3[2];
655 u8 owner;
656 } __packed;
657
658 struct mlx4_init_port_param {
659 int set_guid0;
660 int set_node_guid;
661 int set_si_guid;
662 u16 mtu;
663 int port_width_cap;
664 u16 vl_cap;
665 u16 max_gid;
666 u16 max_pkey;
667 u64 guid0;
668 u64 node_guid;
669 u64 si_guid;
670 };
671
672 #define mlx4_foreach_port(port, dev, type) \
673 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
674 if ((type) == (dev)->caps.port_mask[(port)])
675
676 #define mlx4_foreach_ib_transport_port(port, dev) \
677 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
678 if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
679 ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
680
681 #define MLX4_INVALID_SLAVE_ID 0xFF
682
683 void handle_port_mgmt_change_event(struct work_struct *work);
684
685 static inline int mlx4_master_func_num(struct mlx4_dev *dev)
686 {
687 return dev->caps.function;
688 }
689
690 static inline int mlx4_is_master(struct mlx4_dev *dev)
691 {
692 return dev->flags & MLX4_FLAG_MASTER;
693 }
694
695 static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
696 {
697 return (qpn < dev->caps.base_sqpn + 8 +
698 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev));
699 }
700
701 static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn)
702 {
703 int base = dev->caps.sqp_start + slave * 8;
704
705 if (qpn >= base && qpn < base + 8)
706 return 1;
707
708 return 0;
709 }
710
711 static inline int mlx4_is_mfunc(struct mlx4_dev *dev)
712 {
713 return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER);
714 }
715
716 static inline int mlx4_is_slave(struct mlx4_dev *dev)
717 {
718 return dev->flags & MLX4_FLAG_SLAVE;
719 }
720
721 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
722 struct mlx4_buf *buf);
723 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
724 static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
725 {
726 if (BITS_PER_LONG == 64 || buf->nbufs == 1)
727 return buf->direct.buf + offset;
728 else
729 return buf->page_list[offset >> PAGE_SHIFT].buf +
730 (offset & (PAGE_SIZE - 1));
731 }
732
733 int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
734 void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
735 int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
736 void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
737
738 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
739 void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
740 int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf);
741 void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf);
742
743 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
744 struct mlx4_mtt *mtt);
745 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
746 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
747
748 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
749 int npages, int page_shift, struct mlx4_mr *mr);
750 void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
751 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
752 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
753 int start_index, int npages, u64 *page_list);
754 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
755 struct mlx4_buf *buf);
756
757 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
758 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
759
760 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
761 int size, int max_direct);
762 void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
763 int size);
764
765 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
766 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
767 unsigned vector, int collapsed);
768 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
769
770 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
771 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
772
773 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
774 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
775
776 int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn,
777 struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq);
778 void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq);
779 int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark);
780 int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark);
781
782 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
783 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
784
785 int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
786 int block_mcast_loopback, enum mlx4_protocol prot);
787 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
788 enum mlx4_protocol prot);
789 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
790 u8 port, int block_mcast_loopback,
791 enum mlx4_protocol protocol, u64 *reg_id);
792 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
793 enum mlx4_protocol protocol, u64 reg_id);
794
795 enum {
796 MLX4_DOMAIN_UVERBS = 0x1000,
797 MLX4_DOMAIN_ETHTOOL = 0x2000,
798 MLX4_DOMAIN_RFS = 0x3000,
799 MLX4_DOMAIN_NIC = 0x5000,
800 };
801
802 enum mlx4_net_trans_rule_id {
803 MLX4_NET_TRANS_RULE_ID_ETH = 0,
804 MLX4_NET_TRANS_RULE_ID_IB,
805 MLX4_NET_TRANS_RULE_ID_IPV6,
806 MLX4_NET_TRANS_RULE_ID_IPV4,
807 MLX4_NET_TRANS_RULE_ID_TCP,
808 MLX4_NET_TRANS_RULE_ID_UDP,
809 MLX4_NET_TRANS_RULE_NUM, /* should be last */
810 };
811
812 extern const u16 __sw_id_hw[];
813
814 static inline int map_hw_to_sw_id(u16 header_id)
815 {
816
817 int i;
818 for (i = 0; i < MLX4_NET_TRANS_RULE_NUM; i++) {
819 if (header_id == __sw_id_hw[i])
820 return i;
821 }
822 return -EINVAL;
823 }
824
825 enum mlx4_net_trans_promisc_mode {
826 MLX4_FS_PROMISC_NONE = 0,
827 MLX4_FS_PROMISC_UPLINK,
828 /* For future use. Not implemented yet */
829 MLX4_FS_PROMISC_FUNCTION_PORT,
830 MLX4_FS_PROMISC_ALL_MULTI,
831 };
832
833 struct mlx4_spec_eth {
834 u8 dst_mac[6];
835 u8 dst_mac_msk[6];
836 u8 src_mac[6];
837 u8 src_mac_msk[6];
838 u8 ether_type_enable;
839 __be16 ether_type;
840 __be16 vlan_id_msk;
841 __be16 vlan_id;
842 };
843
844 struct mlx4_spec_tcp_udp {
845 __be16 dst_port;
846 __be16 dst_port_msk;
847 __be16 src_port;
848 __be16 src_port_msk;
849 };
850
851 struct mlx4_spec_ipv4 {
852 __be32 dst_ip;
853 __be32 dst_ip_msk;
854 __be32 src_ip;
855 __be32 src_ip_msk;
856 };
857
858 struct mlx4_spec_ib {
859 __be32 r_qpn;
860 __be32 qpn_msk;
861 u8 dst_gid[16];
862 u8 dst_gid_msk[16];
863 };
864
865 struct mlx4_spec_list {
866 struct list_head list;
867 enum mlx4_net_trans_rule_id id;
868 union {
869 struct mlx4_spec_eth eth;
870 struct mlx4_spec_ib ib;
871 struct mlx4_spec_ipv4 ipv4;
872 struct mlx4_spec_tcp_udp tcp_udp;
873 };
874 };
875
876 enum mlx4_net_trans_hw_rule_queue {
877 MLX4_NET_TRANS_Q_FIFO,
878 MLX4_NET_TRANS_Q_LIFO,
879 };
880
881 struct mlx4_net_trans_rule {
882 struct list_head list;
883 enum mlx4_net_trans_hw_rule_queue queue_mode;
884 bool exclusive;
885 bool allow_loopback;
886 enum mlx4_net_trans_promisc_mode promisc_mode;
887 u8 port;
888 u16 priority;
889 u32 qpn;
890 };
891
892 int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
893 enum mlx4_net_trans_promisc_mode mode);
894 int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
895 enum mlx4_net_trans_promisc_mode mode);
896 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
897 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
898 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
899 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
900 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
901
902 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
903 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
904 int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
905 int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn);
906 void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn);
907 void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap);
908 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
909 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
910 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
911 u8 promisc);
912 int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
913 int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
914 u8 *pg, u16 *ratelimit);
915 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
916 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
917 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
918
919 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
920 int npages, u64 iova, u32 *lkey, u32 *rkey);
921 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
922 int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
923 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
924 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
925 u32 *lkey, u32 *rkey);
926 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
927 int mlx4_SYNC_TPT(struct mlx4_dev *dev);
928 int mlx4_test_interrupts(struct mlx4_dev *dev);
929 int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
930 int *vector);
931 void mlx4_release_eq(struct mlx4_dev *dev, int vec);
932
933 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
934 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
935
936 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
937 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
938
939 int mlx4_flow_attach(struct mlx4_dev *dev,
940 struct mlx4_net_trans_rule *rule, u64 *reg_id);
941 int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
942
943 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
944 int i, int val);
945
946 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey);
947
948 #endif /* MLX4_DEVICE_H */
This page took 0.056618 seconds and 5 git commands to generate.