RDMA/i40iw: Adding queue drain functions
[deliverable/linux.git] / drivers / infiniband / hw / i40iw / i40iw_verbs.h
CommitLineData
d3749841
FL
1/*******************************************************************************
2*
3* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4*
5* This software is available to you under a choice of one of two
6* licenses. You may choose to be licensed under the terms of the GNU
7* General Public License (GPL) Version 2, available from the file
8* COPYING in the main directory of this source tree, or the
9* OpenFabrics.org BSD license below:
10*
11* Redistribution and use in source and binary forms, with or
12* without modification, are permitted provided that the following
13* conditions are met:
14*
15* - Redistributions of source code must retain the above
16* copyright notice, this list of conditions and the following
17* disclaimer.
18*
19* - Redistributions in binary form must reproduce the above
20* copyright notice, this list of conditions and the following
21* disclaimer in the documentation and/or other materials
22* provided with the distribution.
23*
24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31* SOFTWARE.
32*
33*******************************************************************************/
34
35#ifndef I40IW_VERBS_H
36#define I40IW_VERBS_H
37
38struct i40iw_ucontext {
39 struct ib_ucontext ibucontext;
40 struct i40iw_device *iwdev;
41 struct list_head cq_reg_mem_list;
42 spinlock_t cq_reg_mem_list_lock; /* memory list for cq's */
43 struct list_head qp_reg_mem_list;
44 spinlock_t qp_reg_mem_list_lock; /* memory list for qp's */
45};
46
47struct i40iw_pd {
48 struct ib_pd ibpd;
49 struct i40iw_sc_pd sc_pd;
50 atomic_t usecount;
51};
52
53struct i40iw_hmc_pble {
54 union {
55 u32 idx;
56 dma_addr_t addr;
57 };
58};
59
60struct i40iw_cq_mr {
61 struct i40iw_hmc_pble cq_pbl;
62 dma_addr_t shadow;
63};
64
65struct i40iw_qp_mr {
66 struct i40iw_hmc_pble sq_pbl;
67 struct i40iw_hmc_pble rq_pbl;
68 dma_addr_t shadow;
69 struct page *sq_page;
70};
71
72struct i40iw_pbl {
73 struct list_head list;
74 union {
75 struct i40iw_qp_mr qp_mr;
76 struct i40iw_cq_mr cq_mr;
77 };
78
79 bool pbl_allocated;
80 u64 user_base;
81 struct i40iw_pble_alloc pble_alloc;
82 struct i40iw_mr *iwmr;
83};
84
85#define MAX_SAVE_PAGE_ADDRS 4
86struct i40iw_mr {
87 union {
88 struct ib_mr ibmr;
89 struct ib_mw ibmw;
90 struct ib_fmr ibfmr;
91 };
92 struct ib_umem *region;
93 u16 type;
94 u32 page_cnt;
b7aee855 95 u32 npages;
d3749841
FL
96 u32 stag;
97 u64 length;
98 u64 pgaddrmem[MAX_SAVE_PAGE_ADDRS];
99 struct i40iw_pbl iwpbl;
100};
101
102struct i40iw_cq {
103 struct ib_cq ibcq;
104 struct i40iw_sc_cq sc_cq;
105 u16 cq_head;
106 u16 cq_size;
107 u16 cq_number;
108 bool user_mode;
109 u32 polled_completions;
110 u32 cq_mem_size;
111 struct i40iw_dma_mem kmem;
112 spinlock_t lock; /* for poll cq */
113 struct i40iw_pbl *iwpbl;
114};
115
116struct disconn_work {
117 struct work_struct work;
118 struct i40iw_qp *iwqp;
119};
120
121struct iw_cm_id;
122struct ietf_mpa_frame;
123struct i40iw_ud_file;
124
125struct i40iw_qp_kmode {
126 struct i40iw_dma_mem dma_mem;
127 u64 *wrid_mem;
128};
129
130struct i40iw_qp {
131 struct ib_qp ibqp;
132 struct i40iw_sc_qp sc_qp;
133 struct i40iw_device *iwdev;
134 struct i40iw_cq *iwscq;
135 struct i40iw_cq *iwrcq;
136 struct i40iw_pd *iwpd;
137 struct i40iw_qp_host_ctx_info ctx_info;
138 struct i40iwarp_offload_info iwarp_info;
139 void *allocated_buffer;
140 atomic_t refcount;
141 struct iw_cm_id *cm_id;
142 void *cm_node;
143 struct ib_mr *lsmm_mr;
144 struct work_struct work;
145 enum ib_qp_state ibqp_state;
146 u32 iwarp_state;
147 u32 qp_mem_size;
148 u32 last_aeq;
149 atomic_t close_timer_started;
150 spinlock_t lock; /* for post work requests */
151 struct i40iw_qp_context *iwqp_context;
152 void *pbl_vbase;
153 dma_addr_t pbl_pbase;
154 struct page *page;
155 u8 active_conn:1;
156 u8 user_mode:1;
157 u8 hte_added:1;
158 u8 flush_issued:1;
159 u8 destroyed:1;
160 u8 sig_all:1;
161 u8 pau_mode:1;
162 u8 rsvd:1;
163 u16 term_sq_flush_code;
164 u16 term_rq_flush_code;
165 u8 hw_iwarp_state;
166 u8 hw_tcp_state;
167 struct i40iw_qp_kmode kqp;
168 struct i40iw_dma_mem host_ctx;
169 struct timer_list terminate_timer;
170 struct i40iw_pbl *iwpbl;
171 struct i40iw_dma_mem q2_ctx_mem;
172 struct i40iw_dma_mem ietf_mem;
c2b75ef7
IM
173 struct completion sq_drained;
174 struct completion rq_drained;
d3749841
FL
175};
176#endif
This page took 0.063562 seconds and 5 git commands to generate.