Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
00bab910 | 2 | * zfcp device driver |
1da177e4 | 3 | * |
00bab910 | 4 | * Setup and helper functions to access QDIO. |
1da177e4 | 5 | * |
1674b405 | 6 | * Copyright IBM Corporation 2002, 2010 |
1da177e4 LT |
7 | */ |
8 | ||
ecf39d42 CS |
9 | #define KMSG_COMPONENT "zfcp" |
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | |
11 | ||
5a0e3ad6 | 12 | #include <linux/slab.h> |
1da177e4 | 13 | #include "zfcp_ext.h" |
34c2b712 | 14 | #include "zfcp_qdio.h" |
1da177e4 | 15 | |
5d4e2262 | 16 | #define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) |
1da177e4 | 17 | |
00bab910 | 18 | static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal) |
1da177e4 | 19 | { |
b4e44590 | 20 | int pos; |
1da177e4 | 21 | |
b4e44590 | 22 | for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) { |
00bab910 SS |
23 | sbal[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL); |
24 | if (!sbal[pos]) | |
b4e44590 | 25 | return -ENOMEM; |
b4e44590 SS |
26 | } |
27 | for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++) | |
28 | if (pos % QBUFF_PER_PAGE) | |
00bab910 | 29 | sbal[pos] = sbal[pos - 1] + 1; |
b4e44590 | 30 | return 0; |
1da177e4 LT |
31 | } |
32 | ||
564e1c86 | 33 | static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) |
1da177e4 | 34 | { |
564e1c86 SS |
35 | struct zfcp_adapter *adapter = qdio->adapter; |
36 | ||
ff3b24fa | 37 | dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); |
1da177e4 | 38 | |
00bab910 SS |
39 | zfcp_erp_adapter_reopen(adapter, |
40 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | | |
41 | ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL); | |
1da177e4 LT |
42 | } |
43 | ||
5d4e2262 CS |
44 | static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) |
45 | { | |
46 | int i, sbal_idx; | |
47 | ||
48 | for (i = first; i < first + cnt; i++) { | |
49 | sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q; | |
50 | memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer)); | |
51 | } | |
52 | } | |
53 | ||
94506fd1 | 54 | /* this needs to be called prior to updating the queue fill level */ |
41e05a12 | 55 | static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) |
94506fd1 | 56 | { |
41e05a12 | 57 | unsigned long long now, span; |
94506fd1 MP |
58 | int free, used; |
59 | ||
564e1c86 | 60 | spin_lock(&qdio->stat_lock); |
41e05a12 HC |
61 | now = get_clock_monotonic(); |
62 | span = (now - qdio->req_q_time) >> 12; | |
63 | free = atomic_read(&qdio->req_q.count); | |
94506fd1 | 64 | used = QDIO_MAX_BUFFERS_PER_Q - free; |
564e1c86 SS |
65 | qdio->req_q_util += used * span; |
66 | qdio->req_q_time = now; | |
67 | spin_unlock(&qdio->stat_lock); | |
94506fd1 MP |
68 | } |
69 | ||
779e6e1c JG |
70 | static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, |
71 | int queue_no, int first, int count, | |
00bab910 | 72 | unsigned long parm) |
1da177e4 | 73 | { |
564e1c86 SS |
74 | struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; |
75 | struct zfcp_qdio_queue *queue = &qdio->req_q; | |
1da177e4 | 76 | |
779e6e1c | 77 | if (unlikely(qdio_err)) { |
5771710b SS |
78 | zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, |
79 | count); | |
564e1c86 | 80 | zfcp_qdio_handler_error(qdio, "qdireq1"); |
00bab910 SS |
81 | return; |
82 | } | |
1da177e4 LT |
83 | |
84 | /* cleanup all SBALs being program-owned now */ | |
00bab910 | 85 | zfcp_qdio_zero_sbals(queue->sbal, first, count); |
1da177e4 | 86 | |
564e1c86 | 87 | zfcp_qdio_account(qdio); |
00bab910 | 88 | atomic_add(count, &queue->count); |
564e1c86 | 89 | wake_up(&qdio->req_q_wq); |
1da177e4 LT |
90 | } |
91 | ||
564e1c86 | 92 | static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed) |
1da177e4 | 93 | { |
564e1c86 SS |
94 | struct zfcp_qdio_queue *queue = &qdio->resp_q; |
95 | struct ccw_device *cdev = qdio->adapter->ccw_device; | |
00bab910 SS |
96 | u8 count, start = queue->first; |
97 | unsigned int retval; | |
1da177e4 | 98 | |
00bab910 SS |
99 | count = atomic_read(&queue->count) + processed; |
100 | ||
779e6e1c | 101 | retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count); |
00bab910 SS |
102 | |
103 | if (unlikely(retval)) { | |
104 | atomic_set(&queue->count, count); | |
452b505c | 105 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdrpb_1", NULL); |
00bab910 SS |
106 | } else { |
107 | queue->first += count; | |
108 | queue->first %= QDIO_MAX_BUFFERS_PER_Q; | |
109 | atomic_set(&queue->count, 0); | |
110 | } | |
111 | } | |
112 | ||
779e6e1c JG |
113 | static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, |
114 | int queue_no, int first, int count, | |
00bab910 SS |
115 | unsigned long parm) |
116 | { | |
564e1c86 | 117 | struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; |
bd63eaf4 | 118 | int sbal_idx, sbal_no; |
00bab910 | 119 | |
779e6e1c | 120 | if (unlikely(qdio_err)) { |
5771710b SS |
121 | zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, |
122 | count); | |
564e1c86 | 123 | zfcp_qdio_handler_error(qdio, "qdires1"); |
00bab910 SS |
124 | return; |
125 | } | |
1da177e4 | 126 | |
1da177e4 LT |
127 | /* |
128 | * go through all SBALs from input queue currently | |
129 | * returned by QDIO layer | |
130 | */ | |
00bab910 SS |
131 | for (sbal_no = 0; sbal_no < count; sbal_no++) { |
132 | sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; | |
1da177e4 | 133 | /* go through all SBALEs of SBAL */ |
564e1c86 | 134 | zfcp_fsf_reqid_check(qdio, sbal_idx); |
1da177e4 LT |
135 | } |
136 | ||
137 | /* | |
138 | * put range of SBALs back to response queue | |
139 | * (including SBALs which have already been free before) | |
140 | */ | |
564e1c86 | 141 | zfcp_qdio_resp_put_back(qdio, count); |
1da177e4 LT |
142 | } |
143 | ||
564e1c86 | 144 | static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, |
34c2b712 | 145 | struct zfcp_qdio_req *q_req, int max_sbals) |
1da177e4 | 146 | { |
564e1c86 | 147 | int count = atomic_read(&qdio->req_q.count); |
1da177e4 | 148 | count = min(count, max_sbals); |
42428f74 | 149 | q_req->sbal_limit = (q_req->sbal_first + count - 1) |
00bab910 | 150 | % QDIO_MAX_BUFFERS_PER_Q; |
1da177e4 LT |
151 | } |
152 | ||
44cc76f2 | 153 | static struct qdio_buffer_element * |
1674b405 | 154 | zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) |
1da177e4 | 155 | { |
44cc76f2 | 156 | struct qdio_buffer_element *sbale; |
1da177e4 LT |
157 | |
158 | /* set last entry flag in current SBALE of current SBAL */ | |
564e1c86 | 159 | sbale = zfcp_qdio_sbale_curr(qdio, q_req); |
1da177e4 LT |
160 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; |
161 | ||
162 | /* don't exceed last allowed SBAL */ | |
42428f74 | 163 | if (q_req->sbal_last == q_req->sbal_limit) |
1da177e4 LT |
164 | return NULL; |
165 | ||
166 | /* set chaining flag in first SBALE of current SBAL */ | |
564e1c86 | 167 | sbale = zfcp_qdio_sbale_req(qdio, q_req); |
1da177e4 LT |
168 | sbale->flags |= SBAL_FLAGS0_MORE_SBALS; |
169 | ||
170 | /* calculate index of next SBAL */ | |
42428f74 SS |
171 | q_req->sbal_last++; |
172 | q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; | |
1da177e4 LT |
173 | |
174 | /* keep this requests number of SBALs up-to-date */ | |
42428f74 | 175 | q_req->sbal_number++; |
1da177e4 LT |
176 | |
177 | /* start at first SBALE of new SBAL */ | |
42428f74 | 178 | q_req->sbale_curr = 0; |
1da177e4 LT |
179 | |
180 | /* set storage-block type for new SBAL */ | |
564e1c86 | 181 | sbale = zfcp_qdio_sbale_curr(qdio, q_req); |
1674b405 | 182 | sbale->flags |= q_req->sbtype; |
1da177e4 LT |
183 | |
184 | return sbale; | |
185 | } | |
186 | ||
44cc76f2 | 187 | static struct qdio_buffer_element * |
1674b405 | 188 | zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) |
1da177e4 | 189 | { |
1674b405 CS |
190 | if (q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL) |
191 | return zfcp_qdio_sbal_chain(qdio, q_req); | |
42428f74 | 192 | q_req->sbale_curr++; |
564e1c86 | 193 | return zfcp_qdio_sbale_curr(qdio, q_req); |
1da177e4 LT |
194 | } |
195 | ||
564e1c86 | 196 | static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, |
34c2b712 | 197 | struct zfcp_qdio_req *q_req) |
1da177e4 | 198 | { |
564e1c86 | 199 | struct qdio_buffer **sbal = qdio->req_q.sbal; |
42428f74 SS |
200 | int first = q_req->sbal_first; |
201 | int last = q_req->sbal_last; | |
00bab910 SS |
202 | int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % |
203 | QDIO_MAX_BUFFERS_PER_Q + 1; | |
204 | zfcp_qdio_zero_sbals(sbal, first, count); | |
1da177e4 LT |
205 | } |
206 | ||
1da177e4 LT |
207 | /** |
208 | * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list | |
1674b405 CS |
209 | * @qdio: pointer to struct zfcp_qdio |
210 | * @q_req: pointer to struct zfcp_qdio_req | |
1da177e4 | 211 | * @sg: scatter-gather list |
1da177e4 | 212 | * @max_sbals: upper bound for number of SBALs to be used |
00bab910 | 213 | * Returns: number of bytes, or error (negativ) |
1da177e4 | 214 | */ |
34c2b712 | 215 | int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, |
1674b405 | 216 | struct scatterlist *sg, int max_sbals) |
1da177e4 | 217 | { |
44cc76f2 | 218 | struct qdio_buffer_element *sbale; |
68322984 | 219 | int bytes = 0; |
1da177e4 LT |
220 | |
221 | /* figure out last allowed SBAL */ | |
564e1c86 | 222 | zfcp_qdio_sbal_limit(qdio, q_req, max_sbals); |
1da177e4 | 223 | |
00bab910 | 224 | /* set storage-block type for this request */ |
564e1c86 | 225 | sbale = zfcp_qdio_sbale_req(qdio, q_req); |
1674b405 | 226 | sbale->flags |= q_req->sbtype; |
1da177e4 | 227 | |
00bab910 | 228 | for (; sg; sg = sg_next(sg)) { |
1674b405 | 229 | sbale = zfcp_qdio_sbale_next(qdio, q_req); |
68322984 CS |
230 | if (!sbale) { |
231 | atomic_inc(&qdio->req_q_full); | |
232 | zfcp_qdio_undo_sbals(qdio, q_req); | |
233 | return -EINVAL; | |
234 | } | |
235 | ||
236 | sbale->addr = sg_virt(sg); | |
237 | sbale->length = sg->length; | |
238 | ||
00bab910 | 239 | bytes += sg->length; |
1da177e4 | 240 | } |
00bab910 | 241 | |
1da177e4 | 242 | /* assume that no other SBALEs are to follow in the same SBAL */ |
564e1c86 | 243 | sbale = zfcp_qdio_sbale_curr(qdio, q_req); |
1da177e4 | 244 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; |
00bab910 | 245 | |
1da177e4 LT |
246 | return bytes; |
247 | } | |
248 | ||
6b9e1520 CS |
249 | static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) |
250 | { | |
251 | struct zfcp_qdio_queue *req_q = &qdio->req_q; | |
252 | ||
253 | spin_lock_bh(&qdio->req_q_lock); | |
254 | if (atomic_read(&req_q->count)) | |
255 | return 1; | |
256 | spin_unlock_bh(&qdio->req_q_lock); | |
257 | return 0; | |
258 | } | |
259 | ||
260 | /** | |
261 | * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary | |
262 | * @qdio: pointer to struct zfcp_qdio | |
263 | * | |
264 | * The req_q_lock must be held by the caller of this function, and | |
265 | * this function may only be called from process context; it will | |
266 | * sleep when waiting for a free sbal. | |
267 | * | |
268 | * Returns: 0 on success, -EIO if there is no free sbal after waiting. | |
269 | */ | |
270 | int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) | |
271 | { | |
272 | long ret; | |
273 | ||
274 | spin_unlock_bh(&qdio->req_q_lock); | |
275 | ret = wait_event_interruptible_timeout(qdio->req_q_wq, | |
276 | zfcp_qdio_sbal_check(qdio), 5 * HZ); | |
277 | if (ret > 0) | |
278 | return 0; | |
279 | if (!ret) { | |
280 | atomic_inc(&qdio->req_q_full); | |
281 | /* assume hanging outbound queue, try queue recovery */ | |
282 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL); | |
283 | } | |
284 | ||
285 | spin_lock_bh(&qdio->req_q_lock); | |
286 | return -EIO; | |
287 | } | |
288 | ||
1da177e4 | 289 | /** |
00bab910 | 290 | * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO |
564e1c86 | 291 | * @qdio: pointer to struct zfcp_qdio |
34c2b712 | 292 | * @q_req: pointer to struct zfcp_qdio_req |
00bab910 | 293 | * Returns: 0 on success, error otherwise |
1da177e4 | 294 | */ |
34c2b712 | 295 | int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) |
1da177e4 | 296 | { |
564e1c86 | 297 | struct zfcp_qdio_queue *req_q = &qdio->req_q; |
42428f74 SS |
298 | int first = q_req->sbal_first; |
299 | int count = q_req->sbal_number; | |
21ddaa53 CS |
300 | int retval; |
301 | unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; | |
00bab910 | 302 | |
564e1c86 | 303 | zfcp_qdio_account(qdio); |
94506fd1 | 304 | |
564e1c86 SS |
305 | retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first, |
306 | count); | |
00bab910 SS |
307 | if (unlikely(retval)) { |
308 | zfcp_qdio_zero_sbals(req_q->sbal, first, count); | |
309 | return retval; | |
310 | } | |
311 | ||
312 | /* account for transferred buffers */ | |
313 | atomic_sub(count, &req_q->count); | |
314 | req_q->first += count; | |
315 | req_q->first %= QDIO_MAX_BUFFERS_PER_Q; | |
00bab910 | 316 | return 0; |
1da177e4 LT |
317 | } |
318 | ||
564e1c86 SS |
319 | |
320 | static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, | |
321 | struct zfcp_qdio *qdio) | |
322 | { | |
323 | ||
324 | id->cdev = qdio->adapter->ccw_device; | |
325 | id->q_format = QDIO_ZFCP_QFMT; | |
326 | memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); | |
327 | ASCEBC(id->adapter_name, 8); | |
328 | id->qib_param_field_format = 0; | |
329 | id->qib_param_field = NULL; | |
330 | id->input_slib_elements = NULL; | |
331 | id->output_slib_elements = NULL; | |
332 | id->no_input_qs = 1; | |
333 | id->no_output_qs = 1; | |
334 | id->input_handler = zfcp_qdio_int_resp; | |
335 | id->output_handler = zfcp_qdio_int_req; | |
336 | id->int_parm = (unsigned long) qdio; | |
564e1c86 SS |
337 | id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal); |
338 | id->output_sbal_addr_array = (void **) (qdio->req_q.sbal); | |
339 | ||
340 | } | |
00bab910 SS |
341 | /** |
342 | * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data | |
343 | * @adapter: pointer to struct zfcp_adapter | |
344 | * Returns: -ENOMEM on memory allocation error or return value from | |
345 | * qdio_allocate | |
346 | */ | |
d5a282a1 | 347 | static int zfcp_qdio_allocate(struct zfcp_qdio *qdio) |
00bab910 | 348 | { |
564e1c86 | 349 | struct qdio_initialize init_data; |
00bab910 | 350 | |
564e1c86 SS |
351 | if (zfcp_qdio_buffers_enqueue(qdio->req_q.sbal) || |
352 | zfcp_qdio_buffers_enqueue(qdio->resp_q.sbal)) | |
00bab910 SS |
353 | return -ENOMEM; |
354 | ||
564e1c86 SS |
355 | zfcp_qdio_setup_init_data(&init_data, qdio); |
356 | ||
357 | return qdio_allocate(&init_data); | |
00bab910 SS |
358 | } |
359 | ||
360 | /** | |
361 | * zfcp_close_qdio - close qdio queues for an adapter | |
564e1c86 | 362 | * @qdio: pointer to structure zfcp_qdio |
1da177e4 | 363 | */ |
564e1c86 | 364 | void zfcp_qdio_close(struct zfcp_qdio *qdio) |
1da177e4 | 365 | { |
00bab910 SS |
366 | struct zfcp_qdio_queue *req_q; |
367 | int first, count; | |
368 | ||
564e1c86 | 369 | if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) |
00bab910 SS |
370 | return; |
371 | ||
372 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ | |
564e1c86 SS |
373 | req_q = &qdio->req_q; |
374 | spin_lock_bh(&qdio->req_q_lock); | |
375 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); | |
376 | spin_unlock_bh(&qdio->req_q_lock); | |
00bab910 | 377 | |
564e1c86 SS |
378 | qdio_shutdown(qdio->adapter->ccw_device, |
379 | QDIO_FLAG_CLEANUP_USING_CLEAR); | |
00bab910 SS |
380 | |
381 | /* cleanup used outbound sbals */ | |
382 | count = atomic_read(&req_q->count); | |
383 | if (count < QDIO_MAX_BUFFERS_PER_Q) { | |
384 | first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q; | |
385 | count = QDIO_MAX_BUFFERS_PER_Q - count; | |
386 | zfcp_qdio_zero_sbals(req_q->sbal, first, count); | |
1da177e4 | 387 | } |
00bab910 SS |
388 | req_q->first = 0; |
389 | atomic_set(&req_q->count, 0); | |
564e1c86 SS |
390 | qdio->resp_q.first = 0; |
391 | atomic_set(&qdio->resp_q.count, 0); | |
1da177e4 LT |
392 | } |
393 | ||
00bab910 SS |
394 | /** |
395 | * zfcp_qdio_open - prepare and initialize response queue | |
564e1c86 | 396 | * @qdio: pointer to struct zfcp_qdio |
00bab910 SS |
397 | * Returns: 0 on success, otherwise -EIO |
398 | */ | |
564e1c86 | 399 | int zfcp_qdio_open(struct zfcp_qdio *qdio) |
00bab910 | 400 | { |
44cc76f2 | 401 | struct qdio_buffer_element *sbale; |
564e1c86 SS |
402 | struct qdio_initialize init_data; |
403 | struct ccw_device *cdev = qdio->adapter->ccw_device; | |
00bab910 SS |
404 | int cc; |
405 | ||
564e1c86 | 406 | if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) |
00bab910 SS |
407 | return -EIO; |
408 | ||
564e1c86 SS |
409 | zfcp_qdio_setup_init_data(&init_data, qdio); |
410 | ||
411 | if (qdio_establish(&init_data)) | |
ff3b24fa | 412 | goto failed_establish; |
00bab910 | 413 | |
564e1c86 | 414 | if (qdio_activate(cdev)) |
00bab910 | 415 | goto failed_qdio; |
00bab910 SS |
416 | |
417 | for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { | |
564e1c86 | 418 | sbale = &(qdio->resp_q.sbal[cc]->element[0]); |
00bab910 SS |
419 | sbale->length = 0; |
420 | sbale->flags = SBAL_FLAGS_LAST_ENTRY; | |
421 | sbale->addr = NULL; | |
422 | } | |
423 | ||
564e1c86 | 424 | if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, |
ff3b24fa | 425 | QDIO_MAX_BUFFERS_PER_Q)) |
00bab910 | 426 | goto failed_qdio; |
00bab910 SS |
427 | |
428 | /* set index of first avalable SBALS / number of available SBALS */ | |
564e1c86 SS |
429 | qdio->req_q.first = 0; |
430 | atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q); | |
00bab910 SS |
431 | |
432 | return 0; | |
433 | ||
434 | failed_qdio: | |
564e1c86 | 435 | qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); |
ff3b24fa | 436 | failed_establish: |
564e1c86 | 437 | dev_err(&cdev->dev, |
ff3b24fa | 438 | "Setting up the QDIO connection to the FCP adapter failed\n"); |
00bab910 SS |
439 | return -EIO; |
440 | } | |
d5a282a1 SS |
441 | |
442 | void zfcp_qdio_destroy(struct zfcp_qdio *qdio) | |
443 | { | |
444 | struct qdio_buffer **sbal_req, **sbal_resp; | |
445 | int p; | |
446 | ||
447 | if (!qdio) | |
448 | return; | |
449 | ||
450 | if (qdio->adapter->ccw_device) | |
451 | qdio_free(qdio->adapter->ccw_device); | |
452 | ||
453 | sbal_req = qdio->req_q.sbal; | |
454 | sbal_resp = qdio->resp_q.sbal; | |
455 | ||
456 | for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { | |
457 | free_page((unsigned long) sbal_req[p]); | |
458 | free_page((unsigned long) sbal_resp[p]); | |
459 | } | |
460 | ||
461 | kfree(qdio); | |
462 | } | |
463 | ||
464 | int zfcp_qdio_setup(struct zfcp_adapter *adapter) | |
465 | { | |
466 | struct zfcp_qdio *qdio; | |
467 | ||
468 | qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL); | |
469 | if (!qdio) | |
470 | return -ENOMEM; | |
471 | ||
472 | qdio->adapter = adapter; | |
473 | ||
474 | if (zfcp_qdio_allocate(qdio)) { | |
475 | zfcp_qdio_destroy(qdio); | |
476 | return -ENOMEM; | |
477 | } | |
478 | ||
479 | spin_lock_init(&qdio->req_q_lock); | |
480 | spin_lock_init(&qdio->stat_lock); | |
481 | ||
482 | adapter->qdio = qdio; | |
483 | return 0; | |
484 | } | |
485 |