MMerge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / include / rdma / ib_verbs.h
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
2a1d9b7f 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
f7c6a7b5 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
1da177e4
LT
9 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
1da177e4
LT
37 */
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
9b513090
RC
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
459d6e2a 46#include <linux/kref.h>
bfb3ea12
DB
47#include <linux/list.h>
48#include <linux/rwsem.h>
87ae9afd 49#include <linux/scatterlist.h>
f0626710 50#include <linux/workqueue.h>
dd5f03be 51#include <uapi/linux/if_ether.h>
e2773c06 52
60063497 53#include <linux/atomic.h>
882214e2 54#include <linux/mmu_notifier.h>
e2773c06 55#include <asm/uaccess.h>
1da177e4 56
f0626710
TH
57extern struct workqueue_struct *ib_wq;
58
1da177e4
LT
59union ib_gid {
60 u8 raw[16];
61 struct {
97f52eb4
SH
62 __be64 subnet_prefix;
63 __be64 interface_id;
1da177e4
LT
64 } global;
65};
66
07ebafba
TT
67enum rdma_node_type {
68 /* IB values map to NodeInfo:NodeType. */
69 RDMA_NODE_IB_CA = 1,
70 RDMA_NODE_IB_SWITCH,
71 RDMA_NODE_IB_ROUTER,
180771a3
UM
72 RDMA_NODE_RNIC,
73 RDMA_NODE_USNIC,
5db5765e 74 RDMA_NODE_USNIC_UDP,
1da177e4
LT
75};
76
07ebafba
TT
77enum rdma_transport_type {
78 RDMA_TRANSPORT_IB,
180771a3 79 RDMA_TRANSPORT_IWARP,
248567f7
UM
80 RDMA_TRANSPORT_USNIC,
81 RDMA_TRANSPORT_USNIC_UDP
07ebafba
TT
82};
83
8385fd84
RD
84__attribute_const__ enum rdma_transport_type
85rdma_node_get_transport(enum rdma_node_type node_type);
07ebafba 86
a3f5adaf
EC
87enum rdma_link_layer {
88 IB_LINK_LAYER_UNSPECIFIED,
89 IB_LINK_LAYER_INFINIBAND,
90 IB_LINK_LAYER_ETHERNET,
91};
92
1da177e4
LT
93enum ib_device_cap_flags {
94 IB_DEVICE_RESIZE_MAX_WR = 1,
95 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
96 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
97 IB_DEVICE_RAW_MULTI = (1<<3),
98 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
99 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
100 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
101 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
102 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
103 IB_DEVICE_INIT_TYPE = (1<<9),
104 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
105 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
106 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
107 IB_DEVICE_SRQ_RESIZE = (1<<13),
108 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
96f15c03 109 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15),
0f39cf3d 110 IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */
e0605d91
EC
111 IB_DEVICE_MEM_WINDOW = (1<<17),
112 /*
113 * Devices should set IB_DEVICE_UD_IP_SUM if they support
114 * insertion of UDP and TCP checksum on outgoing UD IPoIB
115 * messages and can verify the validity of checksum for
116 * incoming messages. Setting this flag implies that the
117 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
118 */
119 IB_DEVICE_UD_IP_CSUM = (1<<18),
c93570f2 120 IB_DEVICE_UD_TSO = (1<<19),
59991f94 121 IB_DEVICE_XRC = (1<<20),
00f7ec36 122 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
47ee1b9f 123 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
7083e42e 124 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23),
319a441d 125 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24),
1b01d335 126 IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
860f10a7
SG
127 IB_DEVICE_SIGNATURE_HANDOVER = (1<<30),
128 IB_DEVICE_ON_DEMAND_PAGING = (1<<31),
1b01d335
SG
129};
130
131enum ib_signature_prot_cap {
132 IB_PROT_T10DIF_TYPE_1 = 1,
133 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
134 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
135};
136
137enum ib_signature_guard_cap {
138 IB_GUARD_T10DIF_CRC = 1,
139 IB_GUARD_T10DIF_CSUM = 1 << 1,
1da177e4
LT
140};
141
142enum ib_atomic_cap {
143 IB_ATOMIC_NONE,
144 IB_ATOMIC_HCA,
145 IB_ATOMIC_GLOB
146};
147
860f10a7
SG
148enum ib_odp_general_cap_bits {
149 IB_ODP_SUPPORT = 1 << 0,
150};
151
152enum ib_odp_transport_cap_bits {
153 IB_ODP_SUPPORT_SEND = 1 << 0,
154 IB_ODP_SUPPORT_RECV = 1 << 1,
155 IB_ODP_SUPPORT_WRITE = 1 << 2,
156 IB_ODP_SUPPORT_READ = 1 << 3,
157 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
158};
159
160struct ib_odp_caps {
161 uint64_t general_caps;
162 struct {
163 uint32_t rc_odp_caps;
164 uint32_t uc_odp_caps;
165 uint32_t ud_odp_caps;
166 } per_transport_caps;
167};
168
1da177e4
LT
169struct ib_device_attr {
170 u64 fw_ver;
97f52eb4 171 __be64 sys_image_guid;
1da177e4
LT
172 u64 max_mr_size;
173 u64 page_size_cap;
174 u32 vendor_id;
175 u32 vendor_part_id;
176 u32 hw_ver;
177 int max_qp;
178 int max_qp_wr;
179 int device_cap_flags;
180 int max_sge;
181 int max_sge_rd;
182 int max_cq;
183 int max_cqe;
184 int max_mr;
185 int max_pd;
186 int max_qp_rd_atom;
187 int max_ee_rd_atom;
188 int max_res_rd_atom;
189 int max_qp_init_rd_atom;
190 int max_ee_init_rd_atom;
191 enum ib_atomic_cap atomic_cap;
5e80ba8f 192 enum ib_atomic_cap masked_atomic_cap;
1da177e4
LT
193 int max_ee;
194 int max_rdd;
195 int max_mw;
196 int max_raw_ipv6_qp;
197 int max_raw_ethy_qp;
198 int max_mcast_grp;
199 int max_mcast_qp_attach;
200 int max_total_mcast_qp_attach;
201 int max_ah;
202 int max_fmr;
203 int max_map_per_fmr;
204 int max_srq;
205 int max_srq_wr;
206 int max_srq_sge;
00f7ec36 207 unsigned int max_fast_reg_page_list_len;
1da177e4
LT
208 u16 max_pkeys;
209 u8 local_ca_ack_delay;
1b01d335
SG
210 int sig_prot_cap;
211 int sig_guard_cap;
860f10a7 212 struct ib_odp_caps odp_caps;
1da177e4
LT
213};
214
215enum ib_mtu {
216 IB_MTU_256 = 1,
217 IB_MTU_512 = 2,
218 IB_MTU_1024 = 3,
219 IB_MTU_2048 = 4,
220 IB_MTU_4096 = 5
221};
222
223static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
224{
225 switch (mtu) {
226 case IB_MTU_256: return 256;
227 case IB_MTU_512: return 512;
228 case IB_MTU_1024: return 1024;
229 case IB_MTU_2048: return 2048;
230 case IB_MTU_4096: return 4096;
231 default: return -1;
232 }
233}
234
235enum ib_port_state {
236 IB_PORT_NOP = 0,
237 IB_PORT_DOWN = 1,
238 IB_PORT_INIT = 2,
239 IB_PORT_ARMED = 3,
240 IB_PORT_ACTIVE = 4,
241 IB_PORT_ACTIVE_DEFER = 5
242};
243
244enum ib_port_cap_flags {
245 IB_PORT_SM = 1 << 1,
246 IB_PORT_NOTICE_SUP = 1 << 2,
247 IB_PORT_TRAP_SUP = 1 << 3,
248 IB_PORT_OPT_IPD_SUP = 1 << 4,
249 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
250 IB_PORT_SL_MAP_SUP = 1 << 6,
251 IB_PORT_MKEY_NVRAM = 1 << 7,
252 IB_PORT_PKEY_NVRAM = 1 << 8,
253 IB_PORT_LED_INFO_SUP = 1 << 9,
254 IB_PORT_SM_DISABLED = 1 << 10,
255 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
256 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
71eeba16 257 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
1da177e4
LT
258 IB_PORT_CM_SUP = 1 << 16,
259 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
260 IB_PORT_REINIT_SUP = 1 << 18,
261 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
262 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
263 IB_PORT_DR_NOTICE_SUP = 1 << 21,
264 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
265 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
266 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
b4a26a27
MS
267 IB_PORT_CLIENT_REG_SUP = 1 << 25,
268 IB_PORT_IP_BASED_GIDS = 1 << 26
1da177e4
LT
269};
270
271enum ib_port_width {
272 IB_WIDTH_1X = 1,
273 IB_WIDTH_4X = 2,
274 IB_WIDTH_8X = 4,
275 IB_WIDTH_12X = 8
276};
277
278static inline int ib_width_enum_to_int(enum ib_port_width width)
279{
280 switch (width) {
281 case IB_WIDTH_1X: return 1;
282 case IB_WIDTH_4X: return 4;
283 case IB_WIDTH_8X: return 8;
284 case IB_WIDTH_12X: return 12;
285 default: return -1;
286 }
287}
288
2e96691c
OG
289enum ib_port_speed {
290 IB_SPEED_SDR = 1,
291 IB_SPEED_DDR = 2,
292 IB_SPEED_QDR = 4,
293 IB_SPEED_FDR10 = 8,
294 IB_SPEED_FDR = 16,
295 IB_SPEED_EDR = 32
296};
297
7f624d02
SW
298struct ib_protocol_stats {
299 /* TBD... */
300};
301
302struct iw_protocol_stats {
303 u64 ipInReceives;
304 u64 ipInHdrErrors;
305 u64 ipInTooBigErrors;
306 u64 ipInNoRoutes;
307 u64 ipInAddrErrors;
308 u64 ipInUnknownProtos;
309 u64 ipInTruncatedPkts;
310 u64 ipInDiscards;
311 u64 ipInDelivers;
312 u64 ipOutForwDatagrams;
313 u64 ipOutRequests;
314 u64 ipOutDiscards;
315 u64 ipOutNoRoutes;
316 u64 ipReasmTimeout;
317 u64 ipReasmReqds;
318 u64 ipReasmOKs;
319 u64 ipReasmFails;
320 u64 ipFragOKs;
321 u64 ipFragFails;
322 u64 ipFragCreates;
323 u64 ipInMcastPkts;
324 u64 ipOutMcastPkts;
325 u64 ipInBcastPkts;
326 u64 ipOutBcastPkts;
327
328 u64 tcpRtoAlgorithm;
329 u64 tcpRtoMin;
330 u64 tcpRtoMax;
331 u64 tcpMaxConn;
332 u64 tcpActiveOpens;
333 u64 tcpPassiveOpens;
334 u64 tcpAttemptFails;
335 u64 tcpEstabResets;
336 u64 tcpCurrEstab;
337 u64 tcpInSegs;
338 u64 tcpOutSegs;
339 u64 tcpRetransSegs;
340 u64 tcpInErrs;
341 u64 tcpOutRsts;
342};
343
344union rdma_protocol_stats {
345 struct ib_protocol_stats ib;
346 struct iw_protocol_stats iw;
347};
348
1da177e4
LT
349struct ib_port_attr {
350 enum ib_port_state state;
351 enum ib_mtu max_mtu;
352 enum ib_mtu active_mtu;
353 int gid_tbl_len;
354 u32 port_cap_flags;
355 u32 max_msg_sz;
356 u32 bad_pkey_cntr;
357 u32 qkey_viol_cntr;
358 u16 pkey_tbl_len;
359 u16 lid;
360 u16 sm_lid;
361 u8 lmc;
362 u8 max_vl_num;
363 u8 sm_sl;
364 u8 subnet_timeout;
365 u8 init_type_reply;
366 u8 active_width;
367 u8 active_speed;
368 u8 phys_state;
369};
370
371enum ib_device_modify_flags {
c5bcbbb9
RD
372 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
373 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
1da177e4
LT
374};
375
376struct ib_device_modify {
377 u64 sys_image_guid;
c5bcbbb9 378 char node_desc[64];
1da177e4
LT
379};
380
381enum ib_port_modify_flags {
382 IB_PORT_SHUTDOWN = 1,
383 IB_PORT_INIT_TYPE = (1<<2),
384 IB_PORT_RESET_QKEY_CNTR = (1<<3)
385};
386
387struct ib_port_modify {
388 u32 set_port_cap_mask;
389 u32 clr_port_cap_mask;
390 u8 init_type;
391};
392
393enum ib_event_type {
394 IB_EVENT_CQ_ERR,
395 IB_EVENT_QP_FATAL,
396 IB_EVENT_QP_REQ_ERR,
397 IB_EVENT_QP_ACCESS_ERR,
398 IB_EVENT_COMM_EST,
399 IB_EVENT_SQ_DRAINED,
400 IB_EVENT_PATH_MIG,
401 IB_EVENT_PATH_MIG_ERR,
402 IB_EVENT_DEVICE_FATAL,
403 IB_EVENT_PORT_ACTIVE,
404 IB_EVENT_PORT_ERR,
405 IB_EVENT_LID_CHANGE,
406 IB_EVENT_PKEY_CHANGE,
d41fcc67
RD
407 IB_EVENT_SM_CHANGE,
408 IB_EVENT_SRQ_ERR,
409 IB_EVENT_SRQ_LIMIT_REACHED,
63942c9a 410 IB_EVENT_QP_LAST_WQE_REACHED,
761d90ed
OG
411 IB_EVENT_CLIENT_REREGISTER,
412 IB_EVENT_GID_CHANGE,
1da177e4
LT
413};
414
415struct ib_event {
416 struct ib_device *device;
417 union {
418 struct ib_cq *cq;
419 struct ib_qp *qp;
d41fcc67 420 struct ib_srq *srq;
1da177e4
LT
421 u8 port_num;
422 } element;
423 enum ib_event_type event;
424};
425
426struct ib_event_handler {
427 struct ib_device *device;
428 void (*handler)(struct ib_event_handler *, struct ib_event *);
429 struct list_head list;
430};
431
432#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
433 do { \
434 (_ptr)->device = _device; \
435 (_ptr)->handler = _handler; \
436 INIT_LIST_HEAD(&(_ptr)->list); \
437 } while (0)
438
439struct ib_global_route {
440 union ib_gid dgid;
441 u32 flow_label;
442 u8 sgid_index;
443 u8 hop_limit;
444 u8 traffic_class;
445};
446
513789ed 447struct ib_grh {
97f52eb4
SH
448 __be32 version_tclass_flow;
449 __be16 paylen;
513789ed
HR
450 u8 next_hdr;
451 u8 hop_limit;
452 union ib_gid sgid;
453 union ib_gid dgid;
454};
455
1da177e4
LT
456enum {
457 IB_MULTICAST_QPN = 0xffffff
458};
459
f3a7c66b 460#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
97f52eb4 461
1da177e4
LT
462enum ib_ah_flags {
463 IB_AH_GRH = 1
464};
465
bf6a9e31
JM
466enum ib_rate {
467 IB_RATE_PORT_CURRENT = 0,
468 IB_RATE_2_5_GBPS = 2,
469 IB_RATE_5_GBPS = 5,
470 IB_RATE_10_GBPS = 3,
471 IB_RATE_20_GBPS = 6,
472 IB_RATE_30_GBPS = 4,
473 IB_RATE_40_GBPS = 7,
474 IB_RATE_60_GBPS = 8,
475 IB_RATE_80_GBPS = 9,
71eeba16
MA
476 IB_RATE_120_GBPS = 10,
477 IB_RATE_14_GBPS = 11,
478 IB_RATE_56_GBPS = 12,
479 IB_RATE_112_GBPS = 13,
480 IB_RATE_168_GBPS = 14,
481 IB_RATE_25_GBPS = 15,
482 IB_RATE_100_GBPS = 16,
483 IB_RATE_200_GBPS = 17,
484 IB_RATE_300_GBPS = 18
bf6a9e31
JM
485};
486
487/**
488 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
489 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
490 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
491 * @rate: rate to convert.
492 */
8385fd84 493__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
bf6a9e31 494
71eeba16
MA
495/**
496 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
497 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
498 * @rate: rate to convert.
499 */
8385fd84 500__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
71eeba16 501
17cd3a2d
SG
502enum ib_mr_create_flags {
503 IB_MR_SIGNATURE_EN = 1,
504};
505
506/**
507 * ib_mr_init_attr - Memory region init attributes passed to routine
508 * ib_create_mr.
509 * @max_reg_descriptors: max number of registration descriptors that
510 * may be used with registration work requests.
511 * @flags: MR creation flags bit mask.
512 */
513struct ib_mr_init_attr {
514 int max_reg_descriptors;
515 u32 flags;
516};
517
1b01d335 518/**
78eda2bb
SG
519 * Signature types
520 * IB_SIG_TYPE_NONE: Unprotected.
521 * IB_SIG_TYPE_T10_DIF: Type T10-DIF
1b01d335 522 */
78eda2bb
SG
523enum ib_signature_type {
524 IB_SIG_TYPE_NONE,
525 IB_SIG_TYPE_T10_DIF,
1b01d335
SG
526};
527
528/**
529 * Signature T10-DIF block-guard types
530 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
531 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
532 */
533enum ib_t10_dif_bg_type {
534 IB_T10DIF_CRC,
535 IB_T10DIF_CSUM
536};
537
538/**
539 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
540 * domain.
1b01d335
SG
541 * @bg_type: T10-DIF block guard type (CRC|CSUM)
542 * @pi_interval: protection information interval.
543 * @bg: seed of guard computation.
544 * @app_tag: application tag of guard block
545 * @ref_tag: initial guard block reference tag.
78eda2bb
SG
546 * @ref_remap: Indicate wethear the reftag increments each block
547 * @app_escape: Indicate to skip block check if apptag=0xffff
548 * @ref_escape: Indicate to skip block check if reftag=0xffffffff
549 * @apptag_check_mask: check bitmask of application tag.
1b01d335
SG
550 */
551struct ib_t10_dif_domain {
1b01d335
SG
552 enum ib_t10_dif_bg_type bg_type;
553 u16 pi_interval;
554 u16 bg;
555 u16 app_tag;
556 u32 ref_tag;
78eda2bb
SG
557 bool ref_remap;
558 bool app_escape;
559 bool ref_escape;
560 u16 apptag_check_mask;
1b01d335
SG
561};
562
563/**
564 * struct ib_sig_domain - Parameters for signature domain
565 * @sig_type: specific signauture type
566 * @sig: union of all signature domain attributes that may
567 * be used to set domain layout.
568 */
569struct ib_sig_domain {
570 enum ib_signature_type sig_type;
571 union {
572 struct ib_t10_dif_domain dif;
573 } sig;
574};
575
576/**
577 * struct ib_sig_attrs - Parameters for signature handover operation
578 * @check_mask: bitmask for signature byte check (8 bytes)
579 * @mem: memory domain layout desciptor.
580 * @wire: wire domain layout desciptor.
581 */
582struct ib_sig_attrs {
583 u8 check_mask;
584 struct ib_sig_domain mem;
585 struct ib_sig_domain wire;
586};
587
588enum ib_sig_err_type {
589 IB_SIG_BAD_GUARD,
590 IB_SIG_BAD_REFTAG,
591 IB_SIG_BAD_APPTAG,
592};
593
594/**
595 * struct ib_sig_err - signature error descriptor
596 */
597struct ib_sig_err {
598 enum ib_sig_err_type err_type;
599 u32 expected;
600 u32 actual;
601 u64 sig_err_offset;
602 u32 key;
603};
604
605enum ib_mr_status_check {
606 IB_MR_CHECK_SIG_STATUS = 1,
607};
608
609/**
610 * struct ib_mr_status - Memory region status container
611 *
612 * @fail_status: Bitmask of MR checks status. For each
613 * failed check a corresponding status bit is set.
614 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
615 * failure.
616 */
617struct ib_mr_status {
618 u32 fail_status;
619 struct ib_sig_err sig_err;
620};
621
bf6a9e31
JM
622/**
623 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
624 * enum.
625 * @mult: multiple to convert.
626 */
8385fd84 627__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
bf6a9e31 628
1da177e4
LT
629struct ib_ah_attr {
630 struct ib_global_route grh;
631 u16 dlid;
632 u8 sl;
633 u8 src_path_bits;
634 u8 static_rate;
635 u8 ah_flags;
636 u8 port_num;
dd5f03be
MB
637 u8 dmac[ETH_ALEN];
638 u16 vlan_id;
1da177e4
LT
639};
640
641enum ib_wc_status {
642 IB_WC_SUCCESS,
643 IB_WC_LOC_LEN_ERR,
644 IB_WC_LOC_QP_OP_ERR,
645 IB_WC_LOC_EEC_OP_ERR,
646 IB_WC_LOC_PROT_ERR,
647 IB_WC_WR_FLUSH_ERR,
648 IB_WC_MW_BIND_ERR,
649 IB_WC_BAD_RESP_ERR,
650 IB_WC_LOC_ACCESS_ERR,
651 IB_WC_REM_INV_REQ_ERR,
652 IB_WC_REM_ACCESS_ERR,
653 IB_WC_REM_OP_ERR,
654 IB_WC_RETRY_EXC_ERR,
655 IB_WC_RNR_RETRY_EXC_ERR,
656 IB_WC_LOC_RDD_VIOL_ERR,
657 IB_WC_REM_INV_RD_REQ_ERR,
658 IB_WC_REM_ABORT_ERR,
659 IB_WC_INV_EECN_ERR,
660 IB_WC_INV_EEC_STATE_ERR,
661 IB_WC_FATAL_ERR,
662 IB_WC_RESP_TIMEOUT_ERR,
663 IB_WC_GENERAL_ERR
664};
665
666enum ib_wc_opcode {
667 IB_WC_SEND,
668 IB_WC_RDMA_WRITE,
669 IB_WC_RDMA_READ,
670 IB_WC_COMP_SWAP,
671 IB_WC_FETCH_ADD,
672 IB_WC_BIND_MW,
c93570f2 673 IB_WC_LSO,
00f7ec36
SW
674 IB_WC_LOCAL_INV,
675 IB_WC_FAST_REG_MR,
5e80ba8f
VS
676 IB_WC_MASKED_COMP_SWAP,
677 IB_WC_MASKED_FETCH_ADD,
1da177e4
LT
678/*
679 * Set value of IB_WC_RECV so consumers can test if a completion is a
680 * receive by testing (opcode & IB_WC_RECV).
681 */
682 IB_WC_RECV = 1 << 7,
683 IB_WC_RECV_RDMA_WITH_IMM
684};
685
686enum ib_wc_flags {
687 IB_WC_GRH = 1,
00f7ec36
SW
688 IB_WC_WITH_IMM = (1<<1),
689 IB_WC_WITH_INVALIDATE = (1<<2),
d927d505 690 IB_WC_IP_CSUM_OK = (1<<3),
dd5f03be
MB
691 IB_WC_WITH_SMAC = (1<<4),
692 IB_WC_WITH_VLAN = (1<<5),
1da177e4
LT
693};
694
695struct ib_wc {
696 u64 wr_id;
697 enum ib_wc_status status;
698 enum ib_wc_opcode opcode;
699 u32 vendor_err;
700 u32 byte_len;
062dbb69 701 struct ib_qp *qp;
00f7ec36
SW
702 union {
703 __be32 imm_data;
704 u32 invalidate_rkey;
705 } ex;
1da177e4
LT
706 u32 src_qp;
707 int wc_flags;
708 u16 pkey_index;
709 u16 slid;
710 u8 sl;
711 u8 dlid_path_bits;
712 u8 port_num; /* valid only for DR SMPs on switches */
dd5f03be
MB
713 u8 smac[ETH_ALEN];
714 u16 vlan_id;
1da177e4
LT
715};
716
ed23a727
RD
717enum ib_cq_notify_flags {
718 IB_CQ_SOLICITED = 1 << 0,
719 IB_CQ_NEXT_COMP = 1 << 1,
720 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
721 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
1da177e4
LT
722};
723
96104eda 724enum ib_srq_type {
418d5130
SH
725 IB_SRQT_BASIC,
726 IB_SRQT_XRC
96104eda
SH
727};
728
d41fcc67
RD
729enum ib_srq_attr_mask {
730 IB_SRQ_MAX_WR = 1 << 0,
731 IB_SRQ_LIMIT = 1 << 1,
732};
733
734struct ib_srq_attr {
735 u32 max_wr;
736 u32 max_sge;
737 u32 srq_limit;
738};
739
740struct ib_srq_init_attr {
741 void (*event_handler)(struct ib_event *, void *);
742 void *srq_context;
743 struct ib_srq_attr attr;
96104eda 744 enum ib_srq_type srq_type;
418d5130
SH
745
746 union {
747 struct {
748 struct ib_xrcd *xrcd;
749 struct ib_cq *cq;
750 } xrc;
751 } ext;
d41fcc67
RD
752};
753
1da177e4
LT
754struct ib_qp_cap {
755 u32 max_send_wr;
756 u32 max_recv_wr;
757 u32 max_send_sge;
758 u32 max_recv_sge;
759 u32 max_inline_data;
760};
761
762enum ib_sig_type {
763 IB_SIGNAL_ALL_WR,
764 IB_SIGNAL_REQ_WR
765};
766
767enum ib_qp_type {
768 /*
769 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
770 * here (and in that order) since the MAD layer uses them as
771 * indices into a 2-entry table.
772 */
773 IB_QPT_SMI,
774 IB_QPT_GSI,
775
776 IB_QPT_RC,
777 IB_QPT_UC,
778 IB_QPT_UD,
779 IB_QPT_RAW_IPV6,
b42b63cf 780 IB_QPT_RAW_ETHERTYPE,
c938a616 781 IB_QPT_RAW_PACKET = 8,
b42b63cf
SH
782 IB_QPT_XRC_INI = 9,
783 IB_QPT_XRC_TGT,
0134f16b
JM
784 IB_QPT_MAX,
785 /* Reserve a range for qp types internal to the low level driver.
786 * These qp types will not be visible at the IB core layer, so the
787 * IB_QPT_MAX usages should not be affected in the core layer
788 */
789 IB_QPT_RESERVED1 = 0x1000,
790 IB_QPT_RESERVED2,
791 IB_QPT_RESERVED3,
792 IB_QPT_RESERVED4,
793 IB_QPT_RESERVED5,
794 IB_QPT_RESERVED6,
795 IB_QPT_RESERVED7,
796 IB_QPT_RESERVED8,
797 IB_QPT_RESERVED9,
798 IB_QPT_RESERVED10,
1da177e4
LT
799};
800
b846f25a 801enum ib_qp_create_flags {
47ee1b9f
RL
802 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
803 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
90f1d1b4 804 IB_QP_CREATE_NETIF_QP = 1 << 5,
1b01d335 805 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
09b93088 806 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
d2b57063
JM
807 /* reserve bits 26-31 for low level drivers' internal use */
808 IB_QP_CREATE_RESERVED_START = 1 << 26,
809 IB_QP_CREATE_RESERVED_END = 1 << 31,
b846f25a
EC
810};
811
73c40c61
YH
812
813/*
814 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
815 * callback to destroy the passed in QP.
816 */
817
1da177e4
LT
818struct ib_qp_init_attr {
819 void (*event_handler)(struct ib_event *, void *);
820 void *qp_context;
821 struct ib_cq *send_cq;
822 struct ib_cq *recv_cq;
823 struct ib_srq *srq;
b42b63cf 824 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1da177e4
LT
825 struct ib_qp_cap cap;
826 enum ib_sig_type sq_sig_type;
827 enum ib_qp_type qp_type;
b846f25a 828 enum ib_qp_create_flags create_flags;
1da177e4
LT
829 u8 port_num; /* special QP types only */
830};
831
0e0ec7e0
SH
832struct ib_qp_open_attr {
833 void (*event_handler)(struct ib_event *, void *);
834 void *qp_context;
835 u32 qp_num;
836 enum ib_qp_type qp_type;
837};
838
1da177e4
LT
839enum ib_rnr_timeout {
840 IB_RNR_TIMER_655_36 = 0,
841 IB_RNR_TIMER_000_01 = 1,
842 IB_RNR_TIMER_000_02 = 2,
843 IB_RNR_TIMER_000_03 = 3,
844 IB_RNR_TIMER_000_04 = 4,
845 IB_RNR_TIMER_000_06 = 5,
846 IB_RNR_TIMER_000_08 = 6,
847 IB_RNR_TIMER_000_12 = 7,
848 IB_RNR_TIMER_000_16 = 8,
849 IB_RNR_TIMER_000_24 = 9,
850 IB_RNR_TIMER_000_32 = 10,
851 IB_RNR_TIMER_000_48 = 11,
852 IB_RNR_TIMER_000_64 = 12,
853 IB_RNR_TIMER_000_96 = 13,
854 IB_RNR_TIMER_001_28 = 14,
855 IB_RNR_TIMER_001_92 = 15,
856 IB_RNR_TIMER_002_56 = 16,
857 IB_RNR_TIMER_003_84 = 17,
858 IB_RNR_TIMER_005_12 = 18,
859 IB_RNR_TIMER_007_68 = 19,
860 IB_RNR_TIMER_010_24 = 20,
861 IB_RNR_TIMER_015_36 = 21,
862 IB_RNR_TIMER_020_48 = 22,
863 IB_RNR_TIMER_030_72 = 23,
864 IB_RNR_TIMER_040_96 = 24,
865 IB_RNR_TIMER_061_44 = 25,
866 IB_RNR_TIMER_081_92 = 26,
867 IB_RNR_TIMER_122_88 = 27,
868 IB_RNR_TIMER_163_84 = 28,
869 IB_RNR_TIMER_245_76 = 29,
870 IB_RNR_TIMER_327_68 = 30,
871 IB_RNR_TIMER_491_52 = 31
872};
873
874enum ib_qp_attr_mask {
875 IB_QP_STATE = 1,
876 IB_QP_CUR_STATE = (1<<1),
877 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
878 IB_QP_ACCESS_FLAGS = (1<<3),
879 IB_QP_PKEY_INDEX = (1<<4),
880 IB_QP_PORT = (1<<5),
881 IB_QP_QKEY = (1<<6),
882 IB_QP_AV = (1<<7),
883 IB_QP_PATH_MTU = (1<<8),
884 IB_QP_TIMEOUT = (1<<9),
885 IB_QP_RETRY_CNT = (1<<10),
886 IB_QP_RNR_RETRY = (1<<11),
887 IB_QP_RQ_PSN = (1<<12),
888 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
889 IB_QP_ALT_PATH = (1<<14),
890 IB_QP_MIN_RNR_TIMER = (1<<15),
891 IB_QP_SQ_PSN = (1<<16),
892 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
893 IB_QP_PATH_MIG_STATE = (1<<18),
894 IB_QP_CAP = (1<<19),
dd5f03be
MB
895 IB_QP_DEST_QPN = (1<<20),
896 IB_QP_SMAC = (1<<21),
897 IB_QP_ALT_SMAC = (1<<22),
898 IB_QP_VID = (1<<23),
899 IB_QP_ALT_VID = (1<<24),
1da177e4
LT
900};
901
902enum ib_qp_state {
903 IB_QPS_RESET,
904 IB_QPS_INIT,
905 IB_QPS_RTR,
906 IB_QPS_RTS,
907 IB_QPS_SQD,
908 IB_QPS_SQE,
909 IB_QPS_ERR
910};
911
912enum ib_mig_state {
913 IB_MIG_MIGRATED,
914 IB_MIG_REARM,
915 IB_MIG_ARMED
916};
917
7083e42e
SM
918enum ib_mw_type {
919 IB_MW_TYPE_1 = 1,
920 IB_MW_TYPE_2 = 2
921};
922
1da177e4
LT
923struct ib_qp_attr {
924 enum ib_qp_state qp_state;
925 enum ib_qp_state cur_qp_state;
926 enum ib_mtu path_mtu;
927 enum ib_mig_state path_mig_state;
928 u32 qkey;
929 u32 rq_psn;
930 u32 sq_psn;
931 u32 dest_qp_num;
932 int qp_access_flags;
933 struct ib_qp_cap cap;
934 struct ib_ah_attr ah_attr;
935 struct ib_ah_attr alt_ah_attr;
936 u16 pkey_index;
937 u16 alt_pkey_index;
938 u8 en_sqd_async_notify;
939 u8 sq_draining;
940 u8 max_rd_atomic;
941 u8 max_dest_rd_atomic;
942 u8 min_rnr_timer;
943 u8 port_num;
944 u8 timeout;
945 u8 retry_cnt;
946 u8 rnr_retry;
947 u8 alt_port_num;
948 u8 alt_timeout;
dd5f03be
MB
949 u8 smac[ETH_ALEN];
950 u8 alt_smac[ETH_ALEN];
951 u16 vlan_id;
952 u16 alt_vlan_id;
1da177e4
LT
953};
954
955enum ib_wr_opcode {
956 IB_WR_RDMA_WRITE,
957 IB_WR_RDMA_WRITE_WITH_IMM,
958 IB_WR_SEND,
959 IB_WR_SEND_WITH_IMM,
960 IB_WR_RDMA_READ,
961 IB_WR_ATOMIC_CMP_AND_SWP,
c93570f2 962 IB_WR_ATOMIC_FETCH_AND_ADD,
0f39cf3d
RD
963 IB_WR_LSO,
964 IB_WR_SEND_WITH_INV,
00f7ec36
SW
965 IB_WR_RDMA_READ_WITH_INV,
966 IB_WR_LOCAL_INV,
967 IB_WR_FAST_REG_MR,
5e80ba8f
VS
968 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
969 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
7083e42e 970 IB_WR_BIND_MW,
1b01d335 971 IB_WR_REG_SIG_MR,
0134f16b
JM
972 /* reserve values for low level drivers' internal use.
973 * These values will not be used at all in the ib core layer.
974 */
975 IB_WR_RESERVED1 = 0xf0,
976 IB_WR_RESERVED2,
977 IB_WR_RESERVED3,
978 IB_WR_RESERVED4,
979 IB_WR_RESERVED5,
980 IB_WR_RESERVED6,
981 IB_WR_RESERVED7,
982 IB_WR_RESERVED8,
983 IB_WR_RESERVED9,
984 IB_WR_RESERVED10,
1da177e4
LT
985};
986
987enum ib_send_flags {
988 IB_SEND_FENCE = 1,
989 IB_SEND_SIGNALED = (1<<1),
990 IB_SEND_SOLICITED = (1<<2),
e0605d91 991 IB_SEND_INLINE = (1<<3),
0134f16b
JM
992 IB_SEND_IP_CSUM = (1<<4),
993
994 /* reserve bits 26-31 for low level drivers' internal use */
995 IB_SEND_RESERVED_START = (1 << 26),
996 IB_SEND_RESERVED_END = (1 << 31),
1da177e4
LT
997};
998
999struct ib_sge {
1000 u64 addr;
1001 u32 length;
1002 u32 lkey;
1003};
1004
00f7ec36
SW
1005struct ib_fast_reg_page_list {
1006 struct ib_device *device;
1007 u64 *page_list;
1008 unsigned int max_page_list_len;
1009};
1010
7083e42e
SM
1011/**
1012 * struct ib_mw_bind_info - Parameters for a memory window bind operation.
1013 * @mr: A memory region to bind the memory window to.
1014 * @addr: The address where the memory window should begin.
1015 * @length: The length of the memory window, in bytes.
1016 * @mw_access_flags: Access flags from enum ib_access_flags for the window.
1017 *
1018 * This struct contains the shared parameters for type 1 and type 2
1019 * memory window bind operations.
1020 */
1021struct ib_mw_bind_info {
1022 struct ib_mr *mr;
1023 u64 addr;
1024 u64 length;
1025 int mw_access_flags;
1026};
1027
1da177e4
LT
1028struct ib_send_wr {
1029 struct ib_send_wr *next;
1030 u64 wr_id;
1031 struct ib_sge *sg_list;
1032 int num_sge;
1033 enum ib_wr_opcode opcode;
1034 int send_flags;
0f39cf3d
RD
1035 union {
1036 __be32 imm_data;
1037 u32 invalidate_rkey;
1038 } ex;
1da177e4
LT
1039 union {
1040 struct {
1041 u64 remote_addr;
1042 u32 rkey;
1043 } rdma;
1044 struct {
1045 u64 remote_addr;
1046 u64 compare_add;
1047 u64 swap;
5e80ba8f
VS
1048 u64 compare_add_mask;
1049 u64 swap_mask;
1da177e4
LT
1050 u32 rkey;
1051 } atomic;
1052 struct {
1053 struct ib_ah *ah;
c93570f2
EC
1054 void *header;
1055 int hlen;
1056 int mss;
1da177e4
LT
1057 u32 remote_qpn;
1058 u32 remote_qkey;
1da177e4
LT
1059 u16 pkey_index; /* valid for GSI only */
1060 u8 port_num; /* valid for DR SMPs on switch only */
1061 } ud;
00f7ec36
SW
1062 struct {
1063 u64 iova_start;
1064 struct ib_fast_reg_page_list *page_list;
1065 unsigned int page_shift;
1066 unsigned int page_list_len;
1067 u32 length;
1068 int access_flags;
1069 u32 rkey;
1070 } fast_reg;
7083e42e
SM
1071 struct {
1072 struct ib_mw *mw;
1073 /* The new rkey for the memory window. */
1074 u32 rkey;
1075 struct ib_mw_bind_info bind_info;
1076 } bind_mw;
1b01d335
SG
1077 struct {
1078 struct ib_sig_attrs *sig_attrs;
1079 struct ib_mr *sig_mr;
1080 int access_flags;
1081 struct ib_sge *prot;
1082 } sig_handover;
1da177e4 1083 } wr;
b42b63cf 1084 u32 xrc_remote_srq_num; /* XRC TGT QPs only */
1da177e4
LT
1085};
1086
1087struct ib_recv_wr {
1088 struct ib_recv_wr *next;
1089 u64 wr_id;
1090 struct ib_sge *sg_list;
1091 int num_sge;
1092};
1093
1094enum ib_access_flags {
1095 IB_ACCESS_LOCAL_WRITE = 1,
1096 IB_ACCESS_REMOTE_WRITE = (1<<1),
1097 IB_ACCESS_REMOTE_READ = (1<<2),
1098 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
7083e42e 1099 IB_ACCESS_MW_BIND = (1<<4),
860f10a7
SG
1100 IB_ZERO_BASED = (1<<5),
1101 IB_ACCESS_ON_DEMAND = (1<<6),
1da177e4
LT
1102};
1103
1104struct ib_phys_buf {
1105 u64 addr;
1106 u64 size;
1107};
1108
1109struct ib_mr_attr {
1110 struct ib_pd *pd;
1111 u64 device_virt_addr;
1112 u64 size;
1113 int mr_access_flags;
1114 u32 lkey;
1115 u32 rkey;
1116};
1117
1118enum ib_mr_rereg_flags {
1119 IB_MR_REREG_TRANS = 1,
1120 IB_MR_REREG_PD = (1<<1),
7e6edb9b
MB
1121 IB_MR_REREG_ACCESS = (1<<2),
1122 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1da177e4
LT
1123};
1124
7083e42e
SM
1125/**
1126 * struct ib_mw_bind - Parameters for a type 1 memory window bind operation.
1127 * @wr_id: Work request id.
1128 * @send_flags: Flags from ib_send_flags enum.
1129 * @bind_info: More parameters of the bind operation.
1130 */
1da177e4 1131struct ib_mw_bind {
7083e42e
SM
1132 u64 wr_id;
1133 int send_flags;
1134 struct ib_mw_bind_info bind_info;
1da177e4
LT
1135};
1136
1137struct ib_fmr_attr {
1138 int max_pages;
1139 int max_maps;
d36f34aa 1140 u8 page_shift;
1da177e4
LT
1141};
1142
882214e2
HE
1143struct ib_umem;
1144
e2773c06
RD
1145struct ib_ucontext {
1146 struct ib_device *device;
1147 struct list_head pd_list;
1148 struct list_head mr_list;
1149 struct list_head mw_list;
1150 struct list_head cq_list;
1151 struct list_head qp_list;
1152 struct list_head srq_list;
1153 struct list_head ah_list;
53d0bd1e 1154 struct list_head xrcd_list;
436f2ad0 1155 struct list_head rule_list;
f7c6a7b5 1156 int closing;
8ada2c1c
SR
1157
1158 struct pid *tgid;
882214e2
HE
1159#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1160 struct rb_root umem_tree;
1161 /*
1162 * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1163 * mmu notifiers registration.
1164 */
1165 struct rw_semaphore umem_rwsem;
1166 void (*invalidate_range)(struct ib_umem *umem,
1167 unsigned long start, unsigned long end);
1168
1169 struct mmu_notifier mn;
1170 atomic_t notifier_count;
1171 /* A list of umems that don't have private mmu notifier counters yet. */
1172 struct list_head no_private_counters;
1173 int odp_mrs_count;
1174#endif
e2773c06
RD
1175};
1176
1177struct ib_uobject {
1178 u64 user_handle; /* handle given to us by userspace */
1179 struct ib_ucontext *context; /* associated user context */
9ead190b 1180 void *object; /* containing object */
e2773c06 1181 struct list_head list; /* link to context's list */
b3d636b0 1182 int id; /* index into kernel idr */
9ead190b
RD
1183 struct kref ref;
1184 struct rw_semaphore mutex; /* protects .live */
1185 int live;
e2773c06
RD
1186};
1187
e2773c06 1188struct ib_udata {
309243ec 1189 const void __user *inbuf;
e2773c06
RD
1190 void __user *outbuf;
1191 size_t inlen;
1192 size_t outlen;
1193};
1194
1da177e4 1195struct ib_pd {
e2773c06
RD
1196 struct ib_device *device;
1197 struct ib_uobject *uobject;
1198 atomic_t usecnt; /* count all resources */
1da177e4
LT
1199};
1200
59991f94
SH
1201struct ib_xrcd {
1202 struct ib_device *device;
d3d72d90 1203 atomic_t usecnt; /* count all exposed resources */
53d0bd1e 1204 struct inode *inode;
d3d72d90
SH
1205
1206 struct mutex tgt_qp_mutex;
1207 struct list_head tgt_qp_list;
59991f94
SH
1208};
1209
1da177e4
LT
1210struct ib_ah {
1211 struct ib_device *device;
1212 struct ib_pd *pd;
e2773c06 1213 struct ib_uobject *uobject;
1da177e4
LT
1214};
1215
1216typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1217
1218struct ib_cq {
e2773c06
RD
1219 struct ib_device *device;
1220 struct ib_uobject *uobject;
1221 ib_comp_handler comp_handler;
1222 void (*event_handler)(struct ib_event *, void *);
4deccd6d 1223 void *cq_context;
e2773c06
RD
1224 int cqe;
1225 atomic_t usecnt; /* count number of work queues */
1da177e4
LT
1226};
1227
1228struct ib_srq {
d41fcc67
RD
1229 struct ib_device *device;
1230 struct ib_pd *pd;
1231 struct ib_uobject *uobject;
1232 void (*event_handler)(struct ib_event *, void *);
1233 void *srq_context;
96104eda 1234 enum ib_srq_type srq_type;
1da177e4 1235 atomic_t usecnt;
418d5130
SH
1236
1237 union {
1238 struct {
1239 struct ib_xrcd *xrcd;
1240 struct ib_cq *cq;
1241 u32 srq_num;
1242 } xrc;
1243 } ext;
1da177e4
LT
1244};
1245
1246struct ib_qp {
1247 struct ib_device *device;
1248 struct ib_pd *pd;
1249 struct ib_cq *send_cq;
1250 struct ib_cq *recv_cq;
1251 struct ib_srq *srq;
b42b63cf 1252 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
d3d72d90 1253 struct list_head xrcd_list;
319a441d
HHZ
1254 /* count times opened, mcast attaches, flow attaches */
1255 atomic_t usecnt;
0e0ec7e0
SH
1256 struct list_head open_list;
1257 struct ib_qp *real_qp;
e2773c06 1258 struct ib_uobject *uobject;
1da177e4
LT
1259 void (*event_handler)(struct ib_event *, void *);
1260 void *qp_context;
1261 u32 qp_num;
1262 enum ib_qp_type qp_type;
1263};
1264
1265struct ib_mr {
e2773c06
RD
1266 struct ib_device *device;
1267 struct ib_pd *pd;
1268 struct ib_uobject *uobject;
1269 u32 lkey;
1270 u32 rkey;
1271 atomic_t usecnt; /* count number of MWs */
1da177e4
LT
1272};
1273
1274struct ib_mw {
1275 struct ib_device *device;
1276 struct ib_pd *pd;
e2773c06 1277 struct ib_uobject *uobject;
1da177e4 1278 u32 rkey;
7083e42e 1279 enum ib_mw_type type;
1da177e4
LT
1280};
1281
1282struct ib_fmr {
1283 struct ib_device *device;
1284 struct ib_pd *pd;
1285 struct list_head list;
1286 u32 lkey;
1287 u32 rkey;
1288};
1289
319a441d
HHZ
1290/* Supported steering options */
1291enum ib_flow_attr_type {
1292 /* steering according to rule specifications */
1293 IB_FLOW_ATTR_NORMAL = 0x0,
1294 /* default unicast and multicast rule -
1295 * receive all Eth traffic which isn't steered to any QP
1296 */
1297 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1298 /* default multicast rule -
1299 * receive all Eth multicast traffic which isn't steered to any QP
1300 */
1301 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1302 /* sniffer rule - receive all port traffic */
1303 IB_FLOW_ATTR_SNIFFER = 0x3
1304};
1305
1306/* Supported steering header types */
1307enum ib_flow_spec_type {
1308 /* L2 headers*/
1309 IB_FLOW_SPEC_ETH = 0x20,
240ae00e 1310 IB_FLOW_SPEC_IB = 0x22,
319a441d
HHZ
1311 /* L3 header*/
1312 IB_FLOW_SPEC_IPV4 = 0x30,
1313 /* L4 headers*/
1314 IB_FLOW_SPEC_TCP = 0x40,
1315 IB_FLOW_SPEC_UDP = 0x41
1316};
240ae00e 1317#define IB_FLOW_SPEC_LAYER_MASK 0xF0
22878dbc
MB
1318#define IB_FLOW_SPEC_SUPPORT_LAYERS 4
1319
319a441d
HHZ
1320/* Flow steering rule priority is set according to it's domain.
1321 * Lower domain value means higher priority.
1322 */
1323enum ib_flow_domain {
1324 IB_FLOW_DOMAIN_USER,
1325 IB_FLOW_DOMAIN_ETHTOOL,
1326 IB_FLOW_DOMAIN_RFS,
1327 IB_FLOW_DOMAIN_NIC,
1328 IB_FLOW_DOMAIN_NUM /* Must be last */
1329};
1330
1331struct ib_flow_eth_filter {
1332 u8 dst_mac[6];
1333 u8 src_mac[6];
1334 __be16 ether_type;
1335 __be16 vlan_tag;
1336};
1337
1338struct ib_flow_spec_eth {
1339 enum ib_flow_spec_type type;
1340 u16 size;
1341 struct ib_flow_eth_filter val;
1342 struct ib_flow_eth_filter mask;
1343};
1344
240ae00e
MB
1345struct ib_flow_ib_filter {
1346 __be16 dlid;
1347 __u8 sl;
1348};
1349
1350struct ib_flow_spec_ib {
1351 enum ib_flow_spec_type type;
1352 u16 size;
1353 struct ib_flow_ib_filter val;
1354 struct ib_flow_ib_filter mask;
1355};
1356
319a441d
HHZ
1357struct ib_flow_ipv4_filter {
1358 __be32 src_ip;
1359 __be32 dst_ip;
1360};
1361
1362struct ib_flow_spec_ipv4 {
1363 enum ib_flow_spec_type type;
1364 u16 size;
1365 struct ib_flow_ipv4_filter val;
1366 struct ib_flow_ipv4_filter mask;
1367};
1368
1369struct ib_flow_tcp_udp_filter {
1370 __be16 dst_port;
1371 __be16 src_port;
1372};
1373
1374struct ib_flow_spec_tcp_udp {
1375 enum ib_flow_spec_type type;
1376 u16 size;
1377 struct ib_flow_tcp_udp_filter val;
1378 struct ib_flow_tcp_udp_filter mask;
1379};
1380
1381union ib_flow_spec {
1382 struct {
1383 enum ib_flow_spec_type type;
1384 u16 size;
1385 };
1386 struct ib_flow_spec_eth eth;
240ae00e 1387 struct ib_flow_spec_ib ib;
319a441d
HHZ
1388 struct ib_flow_spec_ipv4 ipv4;
1389 struct ib_flow_spec_tcp_udp tcp_udp;
1390};
1391
1392struct ib_flow_attr {
1393 enum ib_flow_attr_type type;
1394 u16 size;
1395 u16 priority;
1396 u32 flags;
1397 u8 num_of_specs;
1398 u8 port;
1399 /* Following are the optional layers according to user request
1400 * struct ib_flow_spec_xxx
1401 * struct ib_flow_spec_yyy
1402 */
1403};
1404
1405struct ib_flow {
1406 struct ib_qp *qp;
1407 struct ib_uobject *uobject;
1408};
1409
1da177e4
LT
1410struct ib_mad;
1411struct ib_grh;
1412
1413enum ib_process_mad_flags {
1414 IB_MAD_IGNORE_MKEY = 1,
1415 IB_MAD_IGNORE_BKEY = 2,
1416 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1417};
1418
1419enum ib_mad_result {
1420 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
1421 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
1422 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
1423 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
1424};
1425
1426#define IB_DEVICE_NAME_MAX 64
1427
1428struct ib_cache {
1429 rwlock_t lock;
1430 struct ib_event_handler event_handler;
1431 struct ib_pkey_cache **pkey_cache;
1432 struct ib_gid_cache **gid_cache;
6fb9cdbf 1433 u8 *lmc_cache;
1da177e4
LT
1434};
1435
9b513090
RC
1436struct ib_dma_mapping_ops {
1437 int (*mapping_error)(struct ib_device *dev,
1438 u64 dma_addr);
1439 u64 (*map_single)(struct ib_device *dev,
1440 void *ptr, size_t size,
1441 enum dma_data_direction direction);
1442 void (*unmap_single)(struct ib_device *dev,
1443 u64 addr, size_t size,
1444 enum dma_data_direction direction);
1445 u64 (*map_page)(struct ib_device *dev,
1446 struct page *page, unsigned long offset,
1447 size_t size,
1448 enum dma_data_direction direction);
1449 void (*unmap_page)(struct ib_device *dev,
1450 u64 addr, size_t size,
1451 enum dma_data_direction direction);
1452 int (*map_sg)(struct ib_device *dev,
1453 struct scatterlist *sg, int nents,
1454 enum dma_data_direction direction);
1455 void (*unmap_sg)(struct ib_device *dev,
1456 struct scatterlist *sg, int nents,
1457 enum dma_data_direction direction);
9b513090
RC
1458 void (*sync_single_for_cpu)(struct ib_device *dev,
1459 u64 dma_handle,
1460 size_t size,
4deccd6d 1461 enum dma_data_direction dir);
9b513090
RC
1462 void (*sync_single_for_device)(struct ib_device *dev,
1463 u64 dma_handle,
1464 size_t size,
1465 enum dma_data_direction dir);
1466 void *(*alloc_coherent)(struct ib_device *dev,
1467 size_t size,
1468 u64 *dma_handle,
1469 gfp_t flag);
1470 void (*free_coherent)(struct ib_device *dev,
1471 size_t size, void *cpu_addr,
1472 u64 dma_handle);
1473};
1474
07ebafba
TT
1475struct iw_cm_verbs;
1476
1da177e4
LT
1477struct ib_device {
1478 struct device *dma_device;
1479
1480 char name[IB_DEVICE_NAME_MAX];
1481
1482 struct list_head event_handler_list;
1483 spinlock_t event_handler_lock;
1484
17a55f79 1485 spinlock_t client_data_lock;
1da177e4
LT
1486 struct list_head core_list;
1487 struct list_head client_data_list;
1da177e4
LT
1488
1489 struct ib_cache cache;
5eb620c8
YE
1490 int *pkey_tbl_len;
1491 int *gid_tbl_len;
1da177e4 1492
f4fd0b22
MT
1493 int num_comp_vectors;
1494
07ebafba
TT
1495 struct iw_cm_verbs *iwcm;
1496
7f624d02
SW
1497 int (*get_protocol_stats)(struct ib_device *device,
1498 union rdma_protocol_stats *stats);
1da177e4
LT
1499 int (*query_device)(struct ib_device *device,
1500 struct ib_device_attr *device_attr);
1501 int (*query_port)(struct ib_device *device,
1502 u8 port_num,
1503 struct ib_port_attr *port_attr);
a3f5adaf
EC
1504 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
1505 u8 port_num);
1da177e4
LT
1506 int (*query_gid)(struct ib_device *device,
1507 u8 port_num, int index,
1508 union ib_gid *gid);
1509 int (*query_pkey)(struct ib_device *device,
1510 u8 port_num, u16 index, u16 *pkey);
1511 int (*modify_device)(struct ib_device *device,
1512 int device_modify_mask,
1513 struct ib_device_modify *device_modify);
1514 int (*modify_port)(struct ib_device *device,
1515 u8 port_num, int port_modify_mask,
1516 struct ib_port_modify *port_modify);
e2773c06
RD
1517 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1518 struct ib_udata *udata);
1519 int (*dealloc_ucontext)(struct ib_ucontext *context);
1520 int (*mmap)(struct ib_ucontext *context,
1521 struct vm_area_struct *vma);
1522 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1523 struct ib_ucontext *context,
1524 struct ib_udata *udata);
1da177e4
LT
1525 int (*dealloc_pd)(struct ib_pd *pd);
1526 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1527 struct ib_ah_attr *ah_attr);
1528 int (*modify_ah)(struct ib_ah *ah,
1529 struct ib_ah_attr *ah_attr);
1530 int (*query_ah)(struct ib_ah *ah,
1531 struct ib_ah_attr *ah_attr);
1532 int (*destroy_ah)(struct ib_ah *ah);
d41fcc67
RD
1533 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1534 struct ib_srq_init_attr *srq_init_attr,
1535 struct ib_udata *udata);
1536 int (*modify_srq)(struct ib_srq *srq,
1537 struct ib_srq_attr *srq_attr,
9bc57e2d
RC
1538 enum ib_srq_attr_mask srq_attr_mask,
1539 struct ib_udata *udata);
d41fcc67
RD
1540 int (*query_srq)(struct ib_srq *srq,
1541 struct ib_srq_attr *srq_attr);
1542 int (*destroy_srq)(struct ib_srq *srq);
1543 int (*post_srq_recv)(struct ib_srq *srq,
1544 struct ib_recv_wr *recv_wr,
1545 struct ib_recv_wr **bad_recv_wr);
1da177e4 1546 struct ib_qp * (*create_qp)(struct ib_pd *pd,
e2773c06
RD
1547 struct ib_qp_init_attr *qp_init_attr,
1548 struct ib_udata *udata);
1da177e4
LT
1549 int (*modify_qp)(struct ib_qp *qp,
1550 struct ib_qp_attr *qp_attr,
9bc57e2d
RC
1551 int qp_attr_mask,
1552 struct ib_udata *udata);
1da177e4
LT
1553 int (*query_qp)(struct ib_qp *qp,
1554 struct ib_qp_attr *qp_attr,
1555 int qp_attr_mask,
1556 struct ib_qp_init_attr *qp_init_attr);
1557 int (*destroy_qp)(struct ib_qp *qp);
1558 int (*post_send)(struct ib_qp *qp,
1559 struct ib_send_wr *send_wr,
1560 struct ib_send_wr **bad_send_wr);
1561 int (*post_recv)(struct ib_qp *qp,
1562 struct ib_recv_wr *recv_wr,
1563 struct ib_recv_wr **bad_recv_wr);
e2773c06 1564 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
f4fd0b22 1565 int comp_vector,
e2773c06
RD
1566 struct ib_ucontext *context,
1567 struct ib_udata *udata);
2dd57162
EC
1568 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1569 u16 cq_period);
1da177e4 1570 int (*destroy_cq)(struct ib_cq *cq);
33b9b3ee
RD
1571 int (*resize_cq)(struct ib_cq *cq, int cqe,
1572 struct ib_udata *udata);
1da177e4
LT
1573 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1574 struct ib_wc *wc);
1575 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1576 int (*req_notify_cq)(struct ib_cq *cq,
ed23a727 1577 enum ib_cq_notify_flags flags);
1da177e4
LT
1578 int (*req_ncomp_notif)(struct ib_cq *cq,
1579 int wc_cnt);
1580 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1581 int mr_access_flags);
1582 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1583 struct ib_phys_buf *phys_buf_array,
1584 int num_phys_buf,
1585 int mr_access_flags,
1586 u64 *iova_start);
e2773c06 1587 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
f7c6a7b5
RD
1588 u64 start, u64 length,
1589 u64 virt_addr,
e2773c06
RD
1590 int mr_access_flags,
1591 struct ib_udata *udata);
7e6edb9b
MB
1592 int (*rereg_user_mr)(struct ib_mr *mr,
1593 int flags,
1594 u64 start, u64 length,
1595 u64 virt_addr,
1596 int mr_access_flags,
1597 struct ib_pd *pd,
1598 struct ib_udata *udata);
1da177e4
LT
1599 int (*query_mr)(struct ib_mr *mr,
1600 struct ib_mr_attr *mr_attr);
1601 int (*dereg_mr)(struct ib_mr *mr);
17cd3a2d
SG
1602 int (*destroy_mr)(struct ib_mr *mr);
1603 struct ib_mr * (*create_mr)(struct ib_pd *pd,
1604 struct ib_mr_init_attr *mr_init_attr);
00f7ec36
SW
1605 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1606 int max_page_list_len);
1607 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1608 int page_list_len);
1609 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1da177e4
LT
1610 int (*rereg_phys_mr)(struct ib_mr *mr,
1611 int mr_rereg_mask,
1612 struct ib_pd *pd,
1613 struct ib_phys_buf *phys_buf_array,
1614 int num_phys_buf,
1615 int mr_access_flags,
1616 u64 *iova_start);
7083e42e
SM
1617 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
1618 enum ib_mw_type type);
1da177e4
LT
1619 int (*bind_mw)(struct ib_qp *qp,
1620 struct ib_mw *mw,
1621 struct ib_mw_bind *mw_bind);
1622 int (*dealloc_mw)(struct ib_mw *mw);
1623 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1624 int mr_access_flags,
1625 struct ib_fmr_attr *fmr_attr);
1626 int (*map_phys_fmr)(struct ib_fmr *fmr,
1627 u64 *page_list, int list_len,
1628 u64 iova);
1629 int (*unmap_fmr)(struct list_head *fmr_list);
1630 int (*dealloc_fmr)(struct ib_fmr *fmr);
1631 int (*attach_mcast)(struct ib_qp *qp,
1632 union ib_gid *gid,
1633 u16 lid);
1634 int (*detach_mcast)(struct ib_qp *qp,
1635 union ib_gid *gid,
1636 u16 lid);
1637 int (*process_mad)(struct ib_device *device,
1638 int process_mad_flags,
1639 u8 port_num,
1640 struct ib_wc *in_wc,
1641 struct ib_grh *in_grh,
1642 struct ib_mad *in_mad,
1643 struct ib_mad *out_mad);
59991f94
SH
1644 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1645 struct ib_ucontext *ucontext,
1646 struct ib_udata *udata);
1647 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
319a441d
HHZ
1648 struct ib_flow * (*create_flow)(struct ib_qp *qp,
1649 struct ib_flow_attr
1650 *flow_attr,
1651 int domain);
1652 int (*destroy_flow)(struct ib_flow *flow_id);
1b01d335
SG
1653 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
1654 struct ib_mr_status *mr_status);
1da177e4 1655
9b513090
RC
1656 struct ib_dma_mapping_ops *dma_ops;
1657
e2773c06 1658 struct module *owner;
f4e91eb4 1659 struct device dev;
35be0681 1660 struct kobject *ports_parent;
1da177e4
LT
1661 struct list_head port_list;
1662
1663 enum {
1664 IB_DEV_UNINITIALIZED,
1665 IB_DEV_REGISTERED,
1666 IB_DEV_UNREGISTERED
1667 } reg_state;
1668
274c0891 1669 int uverbs_abi_ver;
17a55f79 1670 u64 uverbs_cmd_mask;
f21519b2 1671 u64 uverbs_ex_cmd_mask;
274c0891 1672
c5bcbbb9 1673 char node_desc[64];
cf311cd4 1674 __be64 node_guid;
96f15c03 1675 u32 local_dma_lkey;
1da177e4
LT
1676 u8 node_type;
1677 u8 phys_port_cnt;
1678};
1679
1680struct ib_client {
1681 char *name;
1682 void (*add) (struct ib_device *);
1683 void (*remove)(struct ib_device *);
1684
1685 struct list_head list;
1686};
1687
1688struct ib_device *ib_alloc_device(size_t size);
1689void ib_dealloc_device(struct ib_device *device);
1690
9a6edb60
RC
1691int ib_register_device(struct ib_device *device,
1692 int (*port_callback)(struct ib_device *,
1693 u8, struct kobject *));
1da177e4
LT
1694void ib_unregister_device(struct ib_device *device);
1695
1696int ib_register_client (struct ib_client *client);
1697void ib_unregister_client(struct ib_client *client);
1698
1699void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1700void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1701 void *data);
1702
e2773c06
RD
1703static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1704{
1705 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1706}
1707
1708static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1709{
5a77abf9
EC
1710 size_t copy_sz;
1711
1712 copy_sz = min_t(size_t, len, udata->outlen);
1713 return copy_to_user(udata->outbuf, src, copy_sz) ? -EFAULT : 0;
e2773c06
RD
1714}
1715
8a51866f
RD
1716/**
1717 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1718 * contains all required attributes and no attributes not allowed for
1719 * the given QP state transition.
1720 * @cur_state: Current QP state
1721 * @next_state: Next QP state
1722 * @type: QP type
1723 * @mask: Mask of supplied QP attributes
dd5f03be 1724 * @ll : link layer of port
8a51866f
RD
1725 *
1726 * This function is a helper function that a low-level driver's
1727 * modify_qp method can use to validate the consumer's input. It
1728 * checks that cur_state and next_state are valid QP states, that a
1729 * transition from cur_state to next_state is allowed by the IB spec,
1730 * and that the attribute mask supplied is allowed for the transition.
1731 */
1732int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
dd5f03be
MB
1733 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1734 enum rdma_link_layer ll);
8a51866f 1735
1da177e4
LT
1736int ib_register_event_handler (struct ib_event_handler *event_handler);
1737int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1738void ib_dispatch_event(struct ib_event *event);
1739
1740int ib_query_device(struct ib_device *device,
1741 struct ib_device_attr *device_attr);
1742
1743int ib_query_port(struct ib_device *device,
1744 u8 port_num, struct ib_port_attr *port_attr);
1745
a3f5adaf
EC
1746enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1747 u8 port_num);
1748
1da177e4
LT
1749int ib_query_gid(struct ib_device *device,
1750 u8 port_num, int index, union ib_gid *gid);
1751
1752int ib_query_pkey(struct ib_device *device,
1753 u8 port_num, u16 index, u16 *pkey);
1754
1755int ib_modify_device(struct ib_device *device,
1756 int device_modify_mask,
1757 struct ib_device_modify *device_modify);
1758
1759int ib_modify_port(struct ib_device *device,
1760 u8 port_num, int port_modify_mask,
1761 struct ib_port_modify *port_modify);
1762
5eb620c8
YE
1763int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1764 u8 *port_num, u16 *index);
1765
1766int ib_find_pkey(struct ib_device *device,
1767 u8 port_num, u16 pkey, u16 *index);
1768
1da177e4
LT
1769/**
1770 * ib_alloc_pd - Allocates an unused protection domain.
1771 * @device: The device on which to allocate the protection domain.
1772 *
1773 * A protection domain object provides an association between QPs, shared
1774 * receive queues, address handles, memory regions, and memory windows.
1775 */
1776struct ib_pd *ib_alloc_pd(struct ib_device *device);
1777
1778/**
1779 * ib_dealloc_pd - Deallocates a protection domain.
1780 * @pd: The protection domain to deallocate.
1781 */
1782int ib_dealloc_pd(struct ib_pd *pd);
1783
1784/**
1785 * ib_create_ah - Creates an address handle for the given address vector.
1786 * @pd: The protection domain associated with the address handle.
1787 * @ah_attr: The attributes of the address vector.
1788 *
1789 * The address handle is used to reference a local or global destination
1790 * in all UD QP post sends.
1791 */
1792struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1793
4e00d694
SH
1794/**
1795 * ib_init_ah_from_wc - Initializes address handle attributes from a
1796 * work completion.
1797 * @device: Device on which the received message arrived.
1798 * @port_num: Port on which the received message arrived.
1799 * @wc: Work completion associated with the received message.
1800 * @grh: References the received global route header. This parameter is
1801 * ignored unless the work completion indicates that the GRH is valid.
1802 * @ah_attr: Returned attributes that can be used when creating an address
1803 * handle for replying to the message.
1804 */
1805int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1806 struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1807
513789ed
HR
1808/**
1809 * ib_create_ah_from_wc - Creates an address handle associated with the
1810 * sender of the specified work completion.
1811 * @pd: The protection domain associated with the address handle.
1812 * @wc: Work completion information associated with a received message.
1813 * @grh: References the received global route header. This parameter is
1814 * ignored unless the work completion indicates that the GRH is valid.
1815 * @port_num: The outbound port number to associate with the address.
1816 *
1817 * The address handle is used to reference a local or global destination
1818 * in all UD QP post sends.
1819 */
1820struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1821 struct ib_grh *grh, u8 port_num);
1822
1da177e4
LT
1823/**
1824 * ib_modify_ah - Modifies the address vector associated with an address
1825 * handle.
1826 * @ah: The address handle to modify.
1827 * @ah_attr: The new address vector attributes to associate with the
1828 * address handle.
1829 */
1830int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1831
1832/**
1833 * ib_query_ah - Queries the address vector associated with an address
1834 * handle.
1835 * @ah: The address handle to query.
1836 * @ah_attr: The address vector attributes associated with the address
1837 * handle.
1838 */
1839int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1840
1841/**
1842 * ib_destroy_ah - Destroys an address handle.
1843 * @ah: The address handle to destroy.
1844 */
1845int ib_destroy_ah(struct ib_ah *ah);
1846
d41fcc67
RD
1847/**
1848 * ib_create_srq - Creates a SRQ associated with the specified protection
1849 * domain.
1850 * @pd: The protection domain associated with the SRQ.
abb6e9ba
DB
1851 * @srq_init_attr: A list of initial attributes required to create the
1852 * SRQ. If SRQ creation succeeds, then the attributes are updated to
1853 * the actual capabilities of the created SRQ.
d41fcc67
RD
1854 *
1855 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1856 * requested size of the SRQ, and set to the actual values allocated
1857 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
1858 * will always be at least as large as the requested values.
1859 */
1860struct ib_srq *ib_create_srq(struct ib_pd *pd,
1861 struct ib_srq_init_attr *srq_init_attr);
1862
1863/**
1864 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1865 * @srq: The SRQ to modify.
1866 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
1867 * the current values of selected SRQ attributes are returned.
1868 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1869 * are being modified.
1870 *
1871 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1872 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1873 * the number of receives queued drops below the limit.
1874 */
1875int ib_modify_srq(struct ib_srq *srq,
1876 struct ib_srq_attr *srq_attr,
1877 enum ib_srq_attr_mask srq_attr_mask);
1878
1879/**
1880 * ib_query_srq - Returns the attribute list and current values for the
1881 * specified SRQ.
1882 * @srq: The SRQ to query.
1883 * @srq_attr: The attributes of the specified SRQ.
1884 */
1885int ib_query_srq(struct ib_srq *srq,
1886 struct ib_srq_attr *srq_attr);
1887
1888/**
1889 * ib_destroy_srq - Destroys the specified SRQ.
1890 * @srq: The SRQ to destroy.
1891 */
1892int ib_destroy_srq(struct ib_srq *srq);
1893
1894/**
1895 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1896 * @srq: The SRQ to post the work request on.
1897 * @recv_wr: A list of work requests to post on the receive queue.
1898 * @bad_recv_wr: On an immediate failure, this parameter will reference
1899 * the work request that failed to be posted on the QP.
1900 */
1901static inline int ib_post_srq_recv(struct ib_srq *srq,
1902 struct ib_recv_wr *recv_wr,
1903 struct ib_recv_wr **bad_recv_wr)
1904{
1905 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1906}
1907
1da177e4
LT
1908/**
1909 * ib_create_qp - Creates a QP associated with the specified protection
1910 * domain.
1911 * @pd: The protection domain associated with the QP.
abb6e9ba
DB
1912 * @qp_init_attr: A list of initial attributes required to create the
1913 * QP. If QP creation succeeds, then the attributes are updated to
1914 * the actual capabilities of the created QP.
1da177e4
LT
1915 */
1916struct ib_qp *ib_create_qp(struct ib_pd *pd,
1917 struct ib_qp_init_attr *qp_init_attr);
1918
1919/**
1920 * ib_modify_qp - Modifies the attributes for the specified QP and then
1921 * transitions the QP to the given state.
1922 * @qp: The QP to modify.
1923 * @qp_attr: On input, specifies the QP attributes to modify. On output,
1924 * the current values of selected QP attributes are returned.
1925 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1926 * are being modified.
1927 */
1928int ib_modify_qp(struct ib_qp *qp,
1929 struct ib_qp_attr *qp_attr,
1930 int qp_attr_mask);
1931
1932/**
1933 * ib_query_qp - Returns the attribute list and current values for the
1934 * specified QP.
1935 * @qp: The QP to query.
1936 * @qp_attr: The attributes of the specified QP.
1937 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1938 * @qp_init_attr: Additional attributes of the selected QP.
1939 *
1940 * The qp_attr_mask may be used to limit the query to gathering only the
1941 * selected attributes.
1942 */
1943int ib_query_qp(struct ib_qp *qp,
1944 struct ib_qp_attr *qp_attr,
1945 int qp_attr_mask,
1946 struct ib_qp_init_attr *qp_init_attr);
1947
1948/**
1949 * ib_destroy_qp - Destroys the specified QP.
1950 * @qp: The QP to destroy.
1951 */
1952int ib_destroy_qp(struct ib_qp *qp);
1953
d3d72d90 1954/**
0e0ec7e0
SH
1955 * ib_open_qp - Obtain a reference to an existing sharable QP.
1956 * @xrcd - XRC domain
1957 * @qp_open_attr: Attributes identifying the QP to open.
1958 *
1959 * Returns a reference to a sharable QP.
1960 */
1961struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1962 struct ib_qp_open_attr *qp_open_attr);
1963
1964/**
1965 * ib_close_qp - Release an external reference to a QP.
d3d72d90
SH
1966 * @qp: The QP handle to release
1967 *
0e0ec7e0
SH
1968 * The opened QP handle is released by the caller. The underlying
1969 * shared QP is not destroyed until all internal references are released.
d3d72d90 1970 */
0e0ec7e0 1971int ib_close_qp(struct ib_qp *qp);
d3d72d90 1972
1da177e4
LT
1973/**
1974 * ib_post_send - Posts a list of work requests to the send queue of
1975 * the specified QP.
1976 * @qp: The QP to post the work request on.
1977 * @send_wr: A list of work requests to post on the send queue.
1978 * @bad_send_wr: On an immediate failure, this parameter will reference
1979 * the work request that failed to be posted on the QP.
55464d46
BVA
1980 *
1981 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1982 * error is returned, the QP state shall not be affected,
1983 * ib_post_send() will return an immediate error after queueing any
1984 * earlier work requests in the list.
1da177e4
LT
1985 */
1986static inline int ib_post_send(struct ib_qp *qp,
1987 struct ib_send_wr *send_wr,
1988 struct ib_send_wr **bad_send_wr)
1989{
1990 return qp->device->post_send(qp, send_wr, bad_send_wr);
1991}
1992
1993/**
1994 * ib_post_recv - Posts a list of work requests to the receive queue of
1995 * the specified QP.
1996 * @qp: The QP to post the work request on.
1997 * @recv_wr: A list of work requests to post on the receive queue.
1998 * @bad_recv_wr: On an immediate failure, this parameter will reference
1999 * the work request that failed to be posted on the QP.
2000 */
2001static inline int ib_post_recv(struct ib_qp *qp,
2002 struct ib_recv_wr *recv_wr,
2003 struct ib_recv_wr **bad_recv_wr)
2004{
2005 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
2006}
2007
2008/**
2009 * ib_create_cq - Creates a CQ on the specified device.
2010 * @device: The device on which to create the CQ.
2011 * @comp_handler: A user-specified callback that is invoked when a
2012 * completion event occurs on the CQ.
2013 * @event_handler: A user-specified callback that is invoked when an
2014 * asynchronous event not associated with a completion occurs on the CQ.
2015 * @cq_context: Context associated with the CQ returned to the user via
2016 * the associated completion and event handlers.
2017 * @cqe: The minimum size of the CQ.
f4fd0b22
MT
2018 * @comp_vector - Completion vector used to signal completion events.
2019 * Must be >= 0 and < context->num_comp_vectors.
1da177e4
LT
2020 *
2021 * Users can examine the cq structure to determine the actual CQ size.
2022 */
2023struct ib_cq *ib_create_cq(struct ib_device *device,
2024 ib_comp_handler comp_handler,
2025 void (*event_handler)(struct ib_event *, void *),
f4fd0b22 2026 void *cq_context, int cqe, int comp_vector);
1da177e4
LT
2027
2028/**
2029 * ib_resize_cq - Modifies the capacity of the CQ.
2030 * @cq: The CQ to resize.
2031 * @cqe: The minimum size of the CQ.
2032 *
2033 * Users can examine the cq structure to determine the actual CQ size.
2034 */
2035int ib_resize_cq(struct ib_cq *cq, int cqe);
2036
2dd57162
EC
2037/**
2038 * ib_modify_cq - Modifies moderation params of the CQ
2039 * @cq: The CQ to modify.
2040 * @cq_count: number of CQEs that will trigger an event
2041 * @cq_period: max period of time in usec before triggering an event
2042 *
2043 */
2044int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2045
1da177e4
LT
2046/**
2047 * ib_destroy_cq - Destroys the specified CQ.
2048 * @cq: The CQ to destroy.
2049 */
2050int ib_destroy_cq(struct ib_cq *cq);
2051
2052/**
2053 * ib_poll_cq - poll a CQ for completion(s)
2054 * @cq:the CQ being polled
2055 * @num_entries:maximum number of completions to return
2056 * @wc:array of at least @num_entries &struct ib_wc where completions
2057 * will be returned
2058 *
2059 * Poll a CQ for (possibly multiple) completions. If the return value
2060 * is < 0, an error occurred. If the return value is >= 0, it is the
2061 * number of completions returned. If the return value is
2062 * non-negative and < num_entries, then the CQ was emptied.
2063 */
2064static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
2065 struct ib_wc *wc)
2066{
2067 return cq->device->poll_cq(cq, num_entries, wc);
2068}
2069
2070/**
2071 * ib_peek_cq - Returns the number of unreaped completions currently
2072 * on the specified CQ.
2073 * @cq: The CQ to peek.
2074 * @wc_cnt: A minimum number of unreaped completions to check for.
2075 *
2076 * If the number of unreaped completions is greater than or equal to wc_cnt,
2077 * this function returns wc_cnt, otherwise, it returns the actual number of
2078 * unreaped completions.
2079 */
2080int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
2081
2082/**
2083 * ib_req_notify_cq - Request completion notification on a CQ.
2084 * @cq: The CQ to generate an event for.
ed23a727
RD
2085 * @flags:
2086 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
2087 * to request an event on the next solicited event or next work
2088 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
2089 * may also be |ed in to request a hint about missed events, as
2090 * described below.
2091 *
2092 * Return Value:
2093 * < 0 means an error occurred while requesting notification
2094 * == 0 means notification was requested successfully, and if
2095 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
2096 * were missed and it is safe to wait for another event. In
2097 * this case is it guaranteed that any work completions added
2098 * to the CQ since the last CQ poll will trigger a completion
2099 * notification event.
2100 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
2101 * in. It means that the consumer must poll the CQ again to
2102 * make sure it is empty to avoid missing an event because of a
2103 * race between requesting notification and an entry being
2104 * added to the CQ. This return value means it is possible
2105 * (but not guaranteed) that a work completion has been added
2106 * to the CQ since the last poll without triggering a
2107 * completion notification event.
1da177e4
LT
2108 */
2109static inline int ib_req_notify_cq(struct ib_cq *cq,
ed23a727 2110 enum ib_cq_notify_flags flags)
1da177e4 2111{
ed23a727 2112 return cq->device->req_notify_cq(cq, flags);
1da177e4
LT
2113}
2114
2115/**
2116 * ib_req_ncomp_notif - Request completion notification when there are
2117 * at least the specified number of unreaped completions on the CQ.
2118 * @cq: The CQ to generate an event for.
2119 * @wc_cnt: The number of unreaped completions that should be on the
2120 * CQ before an event is generated.
2121 */
2122static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
2123{
2124 return cq->device->req_ncomp_notif ?
2125 cq->device->req_ncomp_notif(cq, wc_cnt) :
2126 -ENOSYS;
2127}
2128
2129/**
2130 * ib_get_dma_mr - Returns a memory region for system memory that is
2131 * usable for DMA.
2132 * @pd: The protection domain associated with the memory region.
2133 * @mr_access_flags: Specifies the memory access rights.
9b513090
RC
2134 *
2135 * Note that the ib_dma_*() functions defined below must be used
2136 * to create/destroy addresses used with the Lkey or Rkey returned
2137 * by ib_get_dma_mr().
1da177e4
LT
2138 */
2139struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
2140
9b513090
RC
2141/**
2142 * ib_dma_mapping_error - check a DMA addr for error
2143 * @dev: The device for which the dma_addr was created
2144 * @dma_addr: The DMA address to check
2145 */
2146static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2147{
d1998ef3
BC
2148 if (dev->dma_ops)
2149 return dev->dma_ops->mapping_error(dev, dma_addr);
8d8bb39b 2150 return dma_mapping_error(dev->dma_device, dma_addr);
9b513090
RC
2151}
2152
2153/**
2154 * ib_dma_map_single - Map a kernel virtual address to DMA address
2155 * @dev: The device for which the dma_addr is to be created
2156 * @cpu_addr: The kernel virtual address
2157 * @size: The size of the region in bytes
2158 * @direction: The direction of the DMA
2159 */
2160static inline u64 ib_dma_map_single(struct ib_device *dev,
2161 void *cpu_addr, size_t size,
2162 enum dma_data_direction direction)
2163{
d1998ef3
BC
2164 if (dev->dma_ops)
2165 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
2166 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
9b513090
RC
2167}
2168
2169/**
2170 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
2171 * @dev: The device for which the DMA address was created
2172 * @addr: The DMA address
2173 * @size: The size of the region in bytes
2174 * @direction: The direction of the DMA
2175 */
2176static inline void ib_dma_unmap_single(struct ib_device *dev,
2177 u64 addr, size_t size,
2178 enum dma_data_direction direction)
2179{
d1998ef3
BC
2180 if (dev->dma_ops)
2181 dev->dma_ops->unmap_single(dev, addr, size, direction);
2182 else
9b513090
RC
2183 dma_unmap_single(dev->dma_device, addr, size, direction);
2184}
2185
cb9fbc5c
AK
2186static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
2187 void *cpu_addr, size_t size,
2188 enum dma_data_direction direction,
2189 struct dma_attrs *attrs)
2190{
2191 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
2192 direction, attrs);
2193}
2194
2195static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
2196 u64 addr, size_t size,
2197 enum dma_data_direction direction,
2198 struct dma_attrs *attrs)
2199{
2200 return dma_unmap_single_attrs(dev->dma_device, addr, size,
2201 direction, attrs);
2202}
2203
9b513090
RC
2204/**
2205 * ib_dma_map_page - Map a physical page to DMA address
2206 * @dev: The device for which the dma_addr is to be created
2207 * @page: The page to be mapped
2208 * @offset: The offset within the page
2209 * @size: The size of the region in bytes
2210 * @direction: The direction of the DMA
2211 */
2212static inline u64 ib_dma_map_page(struct ib_device *dev,
2213 struct page *page,
2214 unsigned long offset,
2215 size_t size,
2216 enum dma_data_direction direction)
2217{
d1998ef3
BC
2218 if (dev->dma_ops)
2219 return dev->dma_ops->map_page(dev, page, offset, size, direction);
2220 return dma_map_page(dev->dma_device, page, offset, size, direction);
9b513090
RC
2221}
2222
2223/**
2224 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
2225 * @dev: The device for which the DMA address was created
2226 * @addr: The DMA address
2227 * @size: The size of the region in bytes
2228 * @direction: The direction of the DMA
2229 */
2230static inline void ib_dma_unmap_page(struct ib_device *dev,
2231 u64 addr, size_t size,
2232 enum dma_data_direction direction)
2233{
d1998ef3
BC
2234 if (dev->dma_ops)
2235 dev->dma_ops->unmap_page(dev, addr, size, direction);
2236 else
9b513090
RC
2237 dma_unmap_page(dev->dma_device, addr, size, direction);
2238}
2239
2240/**
2241 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
2242 * @dev: The device for which the DMA addresses are to be created
2243 * @sg: The array of scatter/gather entries
2244 * @nents: The number of scatter/gather entries
2245 * @direction: The direction of the DMA
2246 */
2247static inline int ib_dma_map_sg(struct ib_device *dev,
2248 struct scatterlist *sg, int nents,
2249 enum dma_data_direction direction)
2250{
d1998ef3
BC
2251 if (dev->dma_ops)
2252 return dev->dma_ops->map_sg(dev, sg, nents, direction);
2253 return dma_map_sg(dev->dma_device, sg, nents, direction);
9b513090
RC
2254}
2255
2256/**
2257 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
2258 * @dev: The device for which the DMA addresses were created
2259 * @sg: The array of scatter/gather entries
2260 * @nents: The number of scatter/gather entries
2261 * @direction: The direction of the DMA
2262 */
2263static inline void ib_dma_unmap_sg(struct ib_device *dev,
2264 struct scatterlist *sg, int nents,
2265 enum dma_data_direction direction)
2266{
d1998ef3
BC
2267 if (dev->dma_ops)
2268 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
2269 else
9b513090
RC
2270 dma_unmap_sg(dev->dma_device, sg, nents, direction);
2271}
2272
cb9fbc5c
AK
2273static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
2274 struct scatterlist *sg, int nents,
2275 enum dma_data_direction direction,
2276 struct dma_attrs *attrs)
2277{
2278 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2279}
2280
2281static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
2282 struct scatterlist *sg, int nents,
2283 enum dma_data_direction direction,
2284 struct dma_attrs *attrs)
2285{
2286 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2287}
9b513090
RC
2288/**
2289 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
2290 * @dev: The device for which the DMA addresses were created
2291 * @sg: The scatter/gather entry
ea58a595
MM
2292 *
2293 * Note: this function is obsolete. To do: change all occurrences of
2294 * ib_sg_dma_address() into sg_dma_address().
9b513090
RC
2295 */
2296static inline u64 ib_sg_dma_address(struct ib_device *dev,
2297 struct scatterlist *sg)
2298{
d1998ef3 2299 return sg_dma_address(sg);
9b513090
RC
2300}
2301
2302/**
2303 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
2304 * @dev: The device for which the DMA addresses were created
2305 * @sg: The scatter/gather entry
ea58a595
MM
2306 *
2307 * Note: this function is obsolete. To do: change all occurrences of
2308 * ib_sg_dma_len() into sg_dma_len().
9b513090
RC
2309 */
2310static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
2311 struct scatterlist *sg)
2312{
d1998ef3 2313 return sg_dma_len(sg);
9b513090
RC
2314}
2315
2316/**
2317 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
2318 * @dev: The device for which the DMA address was created
2319 * @addr: The DMA address
2320 * @size: The size of the region in bytes
2321 * @dir: The direction of the DMA
2322 */
2323static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
2324 u64 addr,
2325 size_t size,
2326 enum dma_data_direction dir)
2327{
d1998ef3
BC
2328 if (dev->dma_ops)
2329 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
2330 else
9b513090
RC
2331 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2332}
2333
2334/**
2335 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
2336 * @dev: The device for which the DMA address was created
2337 * @addr: The DMA address
2338 * @size: The size of the region in bytes
2339 * @dir: The direction of the DMA
2340 */
2341static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
2342 u64 addr,
2343 size_t size,
2344 enum dma_data_direction dir)
2345{
d1998ef3
BC
2346 if (dev->dma_ops)
2347 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
2348 else
9b513090
RC
2349 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2350}
2351
2352/**
2353 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
2354 * @dev: The device for which the DMA address is requested
2355 * @size: The size of the region to allocate in bytes
2356 * @dma_handle: A pointer for returning the DMA address of the region
2357 * @flag: memory allocator flags
2358 */
2359static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
2360 size_t size,
2361 u64 *dma_handle,
2362 gfp_t flag)
2363{
d1998ef3
BC
2364 if (dev->dma_ops)
2365 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
c59a3da1
RD
2366 else {
2367 dma_addr_t handle;
2368 void *ret;
2369
2370 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
2371 *dma_handle = handle;
2372 return ret;
2373 }
9b513090
RC
2374}
2375
2376/**
2377 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
2378 * @dev: The device for which the DMA addresses were allocated
2379 * @size: The size of the region
2380 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
2381 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
2382 */
2383static inline void ib_dma_free_coherent(struct ib_device *dev,
2384 size_t size, void *cpu_addr,
2385 u64 dma_handle)
2386{
d1998ef3
BC
2387 if (dev->dma_ops)
2388 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
2389 else
9b513090
RC
2390 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
2391}
2392
1da177e4
LT
2393/**
2394 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
2395 * by an HCA.
2396 * @pd: The protection domain associated assigned to the registered region.
2397 * @phys_buf_array: Specifies a list of physical buffers to use in the
2398 * memory region.
2399 * @num_phys_buf: Specifies the size of the phys_buf_array.
2400 * @mr_access_flags: Specifies the memory access rights.
2401 * @iova_start: The offset of the region's starting I/O virtual address.
2402 */
2403struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
2404 struct ib_phys_buf *phys_buf_array,
2405 int num_phys_buf,
2406 int mr_access_flags,
2407 u64 *iova_start);
2408
2409/**
2410 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
2411 * Conceptually, this call performs the functions deregister memory region
2412 * followed by register physical memory region. Where possible,
2413 * resources are reused instead of deallocated and reallocated.
2414 * @mr: The memory region to modify.
2415 * @mr_rereg_mask: A bit-mask used to indicate which of the following
2416 * properties of the memory region are being modified.
2417 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
2418 * the new protection domain to associated with the memory region,
2419 * otherwise, this parameter is ignored.
2420 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2421 * field specifies a list of physical buffers to use in the new
2422 * translation, otherwise, this parameter is ignored.
2423 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2424 * field specifies the size of the phys_buf_array, otherwise, this
2425 * parameter is ignored.
2426 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
2427 * field specifies the new memory access rights, otherwise, this
2428 * parameter is ignored.
2429 * @iova_start: The offset of the region's starting I/O virtual address.
2430 */
2431int ib_rereg_phys_mr(struct ib_mr *mr,
2432 int mr_rereg_mask,
2433 struct ib_pd *pd,
2434 struct ib_phys_buf *phys_buf_array,
2435 int num_phys_buf,
2436 int mr_access_flags,
2437 u64 *iova_start);
2438
2439/**
2440 * ib_query_mr - Retrieves information about a specific memory region.
2441 * @mr: The memory region to retrieve information about.
2442 * @mr_attr: The attributes of the specified memory region.
2443 */
2444int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2445
2446/**
2447 * ib_dereg_mr - Deregisters a memory region and removes it from the
2448 * HCA translation table.
2449 * @mr: The memory region to deregister.
7083e42e
SM
2450 *
2451 * This function can fail, if the memory region has memory windows bound to it.
1da177e4
LT
2452 */
2453int ib_dereg_mr(struct ib_mr *mr);
2454
17cd3a2d
SG
2455
2456/**
2457 * ib_create_mr - Allocates a memory region that may be used for
2458 * signature handover operations.
2459 * @pd: The protection domain associated with the region.
2460 * @mr_init_attr: memory region init attributes.
2461 */
2462struct ib_mr *ib_create_mr(struct ib_pd *pd,
2463 struct ib_mr_init_attr *mr_init_attr);
2464
2465/**
2466 * ib_destroy_mr - Destroys a memory region that was created using
2467 * ib_create_mr and removes it from HW translation tables.
2468 * @mr: The memory region to destroy.
2469 *
2470 * This function can fail, if the memory region has memory windows bound to it.
2471 */
2472int ib_destroy_mr(struct ib_mr *mr);
2473
00f7ec36
SW
2474/**
2475 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2476 * IB_WR_FAST_REG_MR send work request.
2477 * @pd: The protection domain associated with the region.
2478 * @max_page_list_len: requested max physical buffer list length to be
2479 * used with fast register work requests for this MR.
2480 */
2481struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2482
2483/**
2484 * ib_alloc_fast_reg_page_list - Allocates a page list array
2485 * @device - ib device pointer.
2486 * @page_list_len - size of the page list array to be allocated.
2487 *
2488 * This allocates and returns a struct ib_fast_reg_page_list * and a
2489 * page_list array that is at least page_list_len in size. The actual
2490 * size is returned in max_page_list_len. The caller is responsible
2491 * for initializing the contents of the page_list array before posting
2492 * a send work request with the IB_WC_FAST_REG_MR opcode.
2493 *
2494 * The page_list array entries must be translated using one of the
2495 * ib_dma_*() functions just like the addresses passed to
2496 * ib_map_phys_fmr(). Once the ib_post_send() is issued, the struct
2497 * ib_fast_reg_page_list must not be modified by the caller until the
2498 * IB_WC_FAST_REG_MR work request completes.
2499 */
2500struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2501 struct ib_device *device, int page_list_len);
2502
2503/**
2504 * ib_free_fast_reg_page_list - Deallocates a previously allocated
2505 * page list array.
2506 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
2507 */
2508void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2509
2510/**
2511 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
2512 * R_Key and L_Key.
2513 * @mr - struct ib_mr pointer to be updated.
2514 * @newkey - new key to be used.
2515 */
2516static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2517{
2518 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2519 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2520}
2521
7083e42e
SM
2522/**
2523 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
2524 * for calculating a new rkey for type 2 memory windows.
2525 * @rkey - the rkey to increment.
2526 */
2527static inline u32 ib_inc_rkey(u32 rkey)
2528{
2529 const u32 mask = 0x000000ff;
2530 return ((rkey + 1) & mask) | (rkey & ~mask);
2531}
2532
1da177e4
LT
2533/**
2534 * ib_alloc_mw - Allocates a memory window.
2535 * @pd: The protection domain associated with the memory window.
7083e42e 2536 * @type: The type of the memory window (1 or 2).
1da177e4 2537 */
7083e42e 2538struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
1da177e4
LT
2539
2540/**
2541 * ib_bind_mw - Posts a work request to the send queue of the specified
2542 * QP, which binds the memory window to the given address range and
2543 * remote access attributes.
2544 * @qp: QP to post the bind work request on.
2545 * @mw: The memory window to bind.
2546 * @mw_bind: Specifies information about the memory window, including
2547 * its address range, remote access rights, and associated memory region.
7083e42e
SM
2548 *
2549 * If there is no immediate error, the function will update the rkey member
2550 * of the mw parameter to its new value. The bind operation can still fail
2551 * asynchronously.
1da177e4
LT
2552 */
2553static inline int ib_bind_mw(struct ib_qp *qp,
2554 struct ib_mw *mw,
2555 struct ib_mw_bind *mw_bind)
2556{
2557 /* XXX reference counting in corresponding MR? */
2558 return mw->device->bind_mw ?
2559 mw->device->bind_mw(qp, mw, mw_bind) :
2560 -ENOSYS;
2561}
2562
2563/**
2564 * ib_dealloc_mw - Deallocates a memory window.
2565 * @mw: The memory window to deallocate.
2566 */
2567int ib_dealloc_mw(struct ib_mw *mw);
2568
2569/**
2570 * ib_alloc_fmr - Allocates a unmapped fast memory region.
2571 * @pd: The protection domain associated with the unmapped region.
2572 * @mr_access_flags: Specifies the memory access rights.
2573 * @fmr_attr: Attributes of the unmapped region.
2574 *
2575 * A fast memory region must be mapped before it can be used as part of
2576 * a work request.
2577 */
2578struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2579 int mr_access_flags,
2580 struct ib_fmr_attr *fmr_attr);
2581
2582/**
2583 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2584 * @fmr: The fast memory region to associate with the pages.
2585 * @page_list: An array of physical pages to map to the fast memory region.
2586 * @list_len: The number of pages in page_list.
2587 * @iova: The I/O virtual address to use with the mapped region.
2588 */
2589static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2590 u64 *page_list, int list_len,
2591 u64 iova)
2592{
2593 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2594}
2595
2596/**
2597 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2598 * @fmr_list: A linked list of fast memory regions to unmap.
2599 */
2600int ib_unmap_fmr(struct list_head *fmr_list);
2601
2602/**
2603 * ib_dealloc_fmr - Deallocates a fast memory region.
2604 * @fmr: The fast memory region to deallocate.
2605 */
2606int ib_dealloc_fmr(struct ib_fmr *fmr);
2607
2608/**
2609 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2610 * @qp: QP to attach to the multicast group. The QP must be type
2611 * IB_QPT_UD.
2612 * @gid: Multicast group GID.
2613 * @lid: Multicast group LID in host byte order.
2614 *
2615 * In order to send and receive multicast packets, subnet
2616 * administration must have created the multicast group and configured
2617 * the fabric appropriately. The port associated with the specified
2618 * QP must also be a member of the multicast group.
2619 */
2620int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2621
2622/**
2623 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2624 * @qp: QP to detach from the multicast group.
2625 * @gid: Multicast group GID.
2626 * @lid: Multicast group LID in host byte order.
2627 */
2628int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2629
59991f94
SH
2630/**
2631 * ib_alloc_xrcd - Allocates an XRC domain.
2632 * @device: The device on which to allocate the XRC domain.
2633 */
2634struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2635
2636/**
2637 * ib_dealloc_xrcd - Deallocates an XRC domain.
2638 * @xrcd: The XRC domain to deallocate.
2639 */
2640int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2641
319a441d
HHZ
2642struct ib_flow *ib_create_flow(struct ib_qp *qp,
2643 struct ib_flow_attr *flow_attr, int domain);
2644int ib_destroy_flow(struct ib_flow *flow_id);
2645
1c636f80
EC
2646static inline int ib_check_mr_access(int flags)
2647{
2648 /*
2649 * Local write permission is required if remote write or
2650 * remote atomic permission is also requested.
2651 */
2652 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
2653 !(flags & IB_ACCESS_LOCAL_WRITE))
2654 return -EINVAL;
2655
2656 return 0;
2657}
2658
1b01d335
SG
2659/**
2660 * ib_check_mr_status: lightweight check of MR status.
2661 * This routine may provide status checks on a selected
2662 * ib_mr. first use is for signature status check.
2663 *
2664 * @mr: A memory region.
2665 * @check_mask: Bitmask of which checks to perform from
2666 * ib_mr_status_check enumeration.
2667 * @mr_status: The container of relevant status checks.
2668 * failed checks will be indicated in the status bitmask
2669 * and the relevant info shall be in the error item.
2670 */
2671int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2672 struct ib_mr_status *mr_status);
2673
1da177e4 2674#endif /* IB_VERBS_H */
This page took 0.797117 seconds and 5 git commands to generate.