Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[deliverable/linux.git] / drivers / s390 / scsi / zfcp_fsf.c
1 /*
2 * zfcp device driver
3 *
4 * Implementation of FSF commands.
5 *
6 * Copyright IBM Corporation 2002, 2010
7 */
8
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <linux/blktrace_api.h>
13 #include <linux/slab.h>
14 #include <scsi/fc/fc_els.h>
15 #include "zfcp_ext.h"
16 #include "zfcp_fc.h"
17 #include "zfcp_dbf.h"
18 #include "zfcp_qdio.h"
19 #include "zfcp_reqlist.h"
20
21 static void zfcp_fsf_request_timeout_handler(unsigned long data)
22 {
23 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
24 zfcp_qdio_siosl(adapter);
25 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
26 "fsrth_1", NULL);
27 }
28
29 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
30 unsigned long timeout)
31 {
32 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
33 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
34 fsf_req->timer.expires = jiffies + timeout;
35 add_timer(&fsf_req->timer);
36 }
37
38 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
39 {
40 BUG_ON(!fsf_req->erp_action);
41 fsf_req->timer.function = zfcp_erp_timeout_handler;
42 fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
43 fsf_req->timer.expires = jiffies + 30 * HZ;
44 add_timer(&fsf_req->timer);
45 }
46
47 /* association between FSF command and FSF QTCB type */
48 static u32 fsf_qtcb_type[] = {
49 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND,
50 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND,
51 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND,
52 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND,
53 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND,
54 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND,
55 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND,
56 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND,
57 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND,
58 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
59 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND,
60 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
61 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
62 };
63
64 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
65 {
66 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
67 "operational because of an unsupported FC class\n");
68 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1", req);
69 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
70 }
71
72 /**
73 * zfcp_fsf_req_free - free memory used by fsf request
74 * @fsf_req: pointer to struct zfcp_fsf_req
75 */
76 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
77 {
78 if (likely(req->pool)) {
79 if (likely(req->qtcb))
80 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
81 mempool_free(req, req->pool);
82 return;
83 }
84
85 if (likely(req->qtcb))
86 kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb);
87 kfree(req);
88 }
89
90 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
91 {
92 unsigned long flags;
93 struct fsf_status_read_buffer *sr_buf = req->data;
94 struct zfcp_adapter *adapter = req->adapter;
95 struct zfcp_port *port;
96 int d_id = ntoh24(sr_buf->d_id);
97
98 read_lock_irqsave(&adapter->port_list_lock, flags);
99 list_for_each_entry(port, &adapter->port_list, list)
100 if (port->d_id == d_id) {
101 zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
102 break;
103 }
104 read_unlock_irqrestore(&adapter->port_list_lock, flags);
105 }
106
107 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
108 struct fsf_link_down_info *link_down)
109 {
110 struct zfcp_adapter *adapter = req->adapter;
111
112 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
113 return;
114
115 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
116
117 zfcp_scsi_schedule_rports_block(adapter);
118
119 if (!link_down)
120 goto out;
121
122 switch (link_down->error_code) {
123 case FSF_PSQ_LINK_NO_LIGHT:
124 dev_warn(&req->adapter->ccw_device->dev,
125 "There is no light signal from the local "
126 "fibre channel cable\n");
127 break;
128 case FSF_PSQ_LINK_WRAP_PLUG:
129 dev_warn(&req->adapter->ccw_device->dev,
130 "There is a wrap plug instead of a fibre "
131 "channel cable\n");
132 break;
133 case FSF_PSQ_LINK_NO_FCP:
134 dev_warn(&req->adapter->ccw_device->dev,
135 "The adjacent fibre channel node does not "
136 "support FCP\n");
137 break;
138 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
139 dev_warn(&req->adapter->ccw_device->dev,
140 "The FCP device is suspended because of a "
141 "firmware update\n");
142 break;
143 case FSF_PSQ_LINK_INVALID_WWPN:
144 dev_warn(&req->adapter->ccw_device->dev,
145 "The FCP device detected a WWPN that is "
146 "duplicate or not valid\n");
147 break;
148 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
149 dev_warn(&req->adapter->ccw_device->dev,
150 "The fibre channel fabric does not support NPIV\n");
151 break;
152 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
153 dev_warn(&req->adapter->ccw_device->dev,
154 "The FCP adapter cannot support more NPIV ports\n");
155 break;
156 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
157 dev_warn(&req->adapter->ccw_device->dev,
158 "The adjacent switch cannot support "
159 "more NPIV ports\n");
160 break;
161 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
162 dev_warn(&req->adapter->ccw_device->dev,
163 "The FCP adapter could not log in to the "
164 "fibre channel fabric\n");
165 break;
166 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
167 dev_warn(&req->adapter->ccw_device->dev,
168 "The WWPN assignment file on the FCP adapter "
169 "has been damaged\n");
170 break;
171 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
172 dev_warn(&req->adapter->ccw_device->dev,
173 "The mode table on the FCP adapter "
174 "has been damaged\n");
175 break;
176 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
177 dev_warn(&req->adapter->ccw_device->dev,
178 "All NPIV ports on the FCP adapter have "
179 "been assigned\n");
180 break;
181 default:
182 dev_warn(&req->adapter->ccw_device->dev,
183 "The link between the FCP adapter and "
184 "the FC fabric is down\n");
185 }
186 out:
187 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
188 }
189
190 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
191 {
192 struct fsf_status_read_buffer *sr_buf = req->data;
193 struct fsf_link_down_info *ldi =
194 (struct fsf_link_down_info *) &sr_buf->payload;
195
196 switch (sr_buf->status_subtype) {
197 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
198 zfcp_fsf_link_down_info_eval(req, ldi);
199 break;
200 case FSF_STATUS_READ_SUB_FDISC_FAILED:
201 zfcp_fsf_link_down_info_eval(req, ldi);
202 break;
203 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
204 zfcp_fsf_link_down_info_eval(req, NULL);
205 };
206 }
207
208 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
209 {
210 struct zfcp_adapter *adapter = req->adapter;
211 struct fsf_status_read_buffer *sr_buf = req->data;
212
213 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
214 zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf);
215 mempool_free(sr_buf, adapter->pool.status_read_data);
216 zfcp_fsf_req_free(req);
217 return;
218 }
219
220 zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf);
221
222 switch (sr_buf->status_type) {
223 case FSF_STATUS_READ_PORT_CLOSED:
224 zfcp_fsf_status_read_port_closed(req);
225 break;
226 case FSF_STATUS_READ_INCOMING_ELS:
227 zfcp_fc_incoming_els(req);
228 break;
229 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
230 break;
231 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
232 dev_warn(&adapter->ccw_device->dev,
233 "The error threshold for checksum statistics "
234 "has been exceeded\n");
235 zfcp_dbf_hba_berr(adapter->dbf, req);
236 break;
237 case FSF_STATUS_READ_LINK_DOWN:
238 zfcp_fsf_status_read_link_down(req);
239 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
240 break;
241 case FSF_STATUS_READ_LINK_UP:
242 dev_info(&adapter->ccw_device->dev,
243 "The local link has been restored\n");
244 /* All ports should be marked as ready to run again */
245 zfcp_erp_set_adapter_status(adapter,
246 ZFCP_STATUS_COMMON_RUNNING);
247 zfcp_erp_adapter_reopen(adapter,
248 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
249 ZFCP_STATUS_COMMON_ERP_FAILED,
250 "fssrh_2", req);
251 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
252
253 break;
254 case FSF_STATUS_READ_NOTIFICATION_LOST:
255 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
256 zfcp_cfdc_adapter_access_changed(adapter);
257 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
258 queue_work(adapter->work_queue, &adapter->scan_work);
259 break;
260 case FSF_STATUS_READ_CFDC_UPDATED:
261 zfcp_cfdc_adapter_access_changed(adapter);
262 break;
263 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
264 adapter->adapter_features = sr_buf->payload.word[0];
265 break;
266 }
267
268 mempool_free(sr_buf, adapter->pool.status_read_data);
269 zfcp_fsf_req_free(req);
270
271 atomic_inc(&adapter->stat_miss);
272 queue_work(adapter->work_queue, &adapter->stat_work);
273 }
274
275 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
276 {
277 switch (req->qtcb->header.fsf_status_qual.word[0]) {
278 case FSF_SQ_FCP_RSP_AVAILABLE:
279 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
280 case FSF_SQ_NO_RETRY_POSSIBLE:
281 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
282 return;
283 case FSF_SQ_COMMAND_ABORTED:
284 break;
285 case FSF_SQ_NO_RECOM:
286 dev_err(&req->adapter->ccw_device->dev,
287 "The FCP adapter reported a problem "
288 "that cannot be recovered\n");
289 zfcp_qdio_siosl(req->adapter);
290 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req);
291 break;
292 }
293 /* all non-return stats set FSFREQ_ERROR*/
294 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
295 }
296
297 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
298 {
299 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
300 return;
301
302 switch (req->qtcb->header.fsf_status) {
303 case FSF_UNKNOWN_COMMAND:
304 dev_err(&req->adapter->ccw_device->dev,
305 "The FCP adapter does not recognize the command 0x%x\n",
306 req->qtcb->header.fsf_command);
307 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1", req);
308 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
309 break;
310 case FSF_ADAPTER_STATUS_AVAILABLE:
311 zfcp_fsf_fsfstatus_qual_eval(req);
312 break;
313 }
314 }
315
316 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
317 {
318 struct zfcp_adapter *adapter = req->adapter;
319 struct fsf_qtcb *qtcb = req->qtcb;
320 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
321
322 zfcp_dbf_hba_fsf_response(req);
323
324 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
325 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
326 return;
327 }
328
329 switch (qtcb->prefix.prot_status) {
330 case FSF_PROT_GOOD:
331 case FSF_PROT_FSF_STATUS_PRESENTED:
332 return;
333 case FSF_PROT_QTCB_VERSION_ERROR:
334 dev_err(&adapter->ccw_device->dev,
335 "QTCB version 0x%x not supported by FCP adapter "
336 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
337 psq->word[0], psq->word[1]);
338 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1", req);
339 break;
340 case FSF_PROT_ERROR_STATE:
341 case FSF_PROT_SEQ_NUMB_ERROR:
342 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
343 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
344 break;
345 case FSF_PROT_UNSUPP_QTCB_TYPE:
346 dev_err(&adapter->ccw_device->dev,
347 "The QTCB type is not supported by the FCP adapter\n");
348 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3", req);
349 break;
350 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
351 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
352 &adapter->status);
353 break;
354 case FSF_PROT_DUPLICATE_REQUEST_ID:
355 dev_err(&adapter->ccw_device->dev,
356 "0x%Lx is an ambiguous request identifier\n",
357 (unsigned long long)qtcb->bottom.support.req_handle);
358 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req);
359 break;
360 case FSF_PROT_LINK_DOWN:
361 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
362 /* go through reopen to flush pending requests */
363 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
364 break;
365 case FSF_PROT_REEST_QUEUE:
366 /* All ports should be marked as ready to run again */
367 zfcp_erp_set_adapter_status(adapter,
368 ZFCP_STATUS_COMMON_RUNNING);
369 zfcp_erp_adapter_reopen(adapter,
370 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
371 ZFCP_STATUS_COMMON_ERP_FAILED,
372 "fspse_8", req);
373 break;
374 default:
375 dev_err(&adapter->ccw_device->dev,
376 "0x%x is not a valid transfer protocol status\n",
377 qtcb->prefix.prot_status);
378 zfcp_qdio_siosl(adapter);
379 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req);
380 }
381 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
382 }
383
384 /**
385 * zfcp_fsf_req_complete - process completion of a FSF request
386 * @fsf_req: The FSF request that has been completed.
387 *
388 * When a request has been completed either from the FCP adapter,
389 * or it has been dismissed due to a queue shutdown, this function
390 * is called to process the completion status and trigger further
391 * events related to the FSF request.
392 */
393 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
394 {
395 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
396 zfcp_fsf_status_read_handler(req);
397 return;
398 }
399
400 del_timer(&req->timer);
401 zfcp_fsf_protstatus_eval(req);
402 zfcp_fsf_fsfstatus_eval(req);
403 req->handler(req);
404
405 if (req->erp_action)
406 zfcp_erp_notify(req->erp_action, 0);
407
408 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
409 zfcp_fsf_req_free(req);
410 else
411 complete(&req->completion);
412 }
413
414 /**
415 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
416 * @adapter: pointer to struct zfcp_adapter
417 *
418 * Never ever call this without shutting down the adapter first.
419 * Otherwise the adapter would continue using and corrupting s390 storage.
420 * Included BUG_ON() call to ensure this is done.
421 * ERP is supposed to be the only user of this function.
422 */
423 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
424 {
425 struct zfcp_fsf_req *req, *tmp;
426 LIST_HEAD(remove_queue);
427
428 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
429 zfcp_reqlist_move(adapter->req_list, &remove_queue);
430
431 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
432 list_del(&req->list);
433 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
434 zfcp_fsf_req_complete(req);
435 }
436 }
437
438 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
439 {
440 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
441 struct zfcp_adapter *adapter = req->adapter;
442 struct Scsi_Host *shost = adapter->scsi_host;
443 struct fc_els_flogi *nsp, *plogi;
444
445 /* adjust pointers for missing command code */
446 nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
447 - sizeof(u32));
448 plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
449 - sizeof(u32));
450
451 if (req->data)
452 memcpy(req->data, bottom, sizeof(*bottom));
453
454 fc_host_port_name(shost) = nsp->fl_wwpn;
455 fc_host_node_name(shost) = nsp->fl_wwnn;
456 fc_host_port_id(shost) = ntoh24(bottom->s_id);
457 fc_host_speed(shost) = bottom->fc_link_speed;
458 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
459
460 adapter->hydra_version = bottom->adapter_type;
461 adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
462 adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
463 (u16)FSF_STATUS_READS_RECOM);
464
465 if (fc_host_permanent_port_name(shost) == -1)
466 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
467
468 switch (bottom->fc_topology) {
469 case FSF_TOPO_P2P:
470 adapter->peer_d_id = ntoh24(bottom->peer_d_id);
471 adapter->peer_wwpn = plogi->fl_wwpn;
472 adapter->peer_wwnn = plogi->fl_wwnn;
473 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
474 break;
475 case FSF_TOPO_FABRIC:
476 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
477 break;
478 case FSF_TOPO_AL:
479 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
480 /* fall through */
481 default:
482 dev_err(&adapter->ccw_device->dev,
483 "Unknown or unsupported arbitrated loop "
484 "fibre channel topology detected\n");
485 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1", req);
486 return -EIO;
487 }
488
489 zfcp_scsi_set_prot(adapter);
490
491 return 0;
492 }
493
494 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
495 {
496 struct zfcp_adapter *adapter = req->adapter;
497 struct fsf_qtcb *qtcb = req->qtcb;
498 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
499 struct Scsi_Host *shost = adapter->scsi_host;
500
501 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
502 return;
503
504 adapter->fsf_lic_version = bottom->lic_version;
505 adapter->adapter_features = bottom->adapter_features;
506 adapter->connection_features = bottom->connection_features;
507 adapter->peer_wwpn = 0;
508 adapter->peer_wwnn = 0;
509 adapter->peer_d_id = 0;
510
511 switch (qtcb->header.fsf_status) {
512 case FSF_GOOD:
513 if (zfcp_fsf_exchange_config_evaluate(req))
514 return;
515
516 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
517 dev_err(&adapter->ccw_device->dev,
518 "FCP adapter maximum QTCB size (%d bytes) "
519 "is too small\n",
520 bottom->max_qtcb_size);
521 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1", req);
522 return;
523 }
524 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
525 &adapter->status);
526 break;
527 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
528 fc_host_node_name(shost) = 0;
529 fc_host_port_name(shost) = 0;
530 fc_host_port_id(shost) = 0;
531 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
532 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
533 adapter->hydra_version = 0;
534
535 zfcp_fsf_link_down_info_eval(req,
536 &qtcb->header.fsf_status_qual.link_down_info);
537 break;
538 default:
539 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3", req);
540 return;
541 }
542
543 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
544 adapter->hardware_version = bottom->hardware_version;
545 memcpy(fc_host_serial_number(shost), bottom->serial_number,
546 min(FC_SERIAL_NUMBER_SIZE, 17));
547 EBCASC(fc_host_serial_number(shost),
548 min(FC_SERIAL_NUMBER_SIZE, 17));
549 }
550
551 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
552 dev_err(&adapter->ccw_device->dev,
553 "The FCP adapter only supports newer "
554 "control block versions\n");
555 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4", req);
556 return;
557 }
558 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
559 dev_err(&adapter->ccw_device->dev,
560 "The FCP adapter only supports older "
561 "control block versions\n");
562 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5", req);
563 }
564 }
565
566 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
567 {
568 struct zfcp_adapter *adapter = req->adapter;
569 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
570 struct Scsi_Host *shost = adapter->scsi_host;
571
572 if (req->data)
573 memcpy(req->data, bottom, sizeof(*bottom));
574
575 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
576 fc_host_permanent_port_name(shost) = bottom->wwpn;
577 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
578 } else
579 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
580 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
581 fc_host_supported_speeds(shost) = bottom->supported_speed;
582 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
583 FC_FC4_LIST_SIZE);
584 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
585 FC_FC4_LIST_SIZE);
586 }
587
588 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
589 {
590 struct fsf_qtcb *qtcb = req->qtcb;
591
592 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
593 return;
594
595 switch (qtcb->header.fsf_status) {
596 case FSF_GOOD:
597 zfcp_fsf_exchange_port_evaluate(req);
598 break;
599 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
600 zfcp_fsf_exchange_port_evaluate(req);
601 zfcp_fsf_link_down_info_eval(req,
602 &qtcb->header.fsf_status_qual.link_down_info);
603 break;
604 }
605 }
606
607 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
608 {
609 struct zfcp_fsf_req *req;
610
611 if (likely(pool))
612 req = mempool_alloc(pool, GFP_ATOMIC);
613 else
614 req = kmalloc(sizeof(*req), GFP_ATOMIC);
615
616 if (unlikely(!req))
617 return NULL;
618
619 memset(req, 0, sizeof(*req));
620 req->pool = pool;
621 return req;
622 }
623
624 static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
625 {
626 struct fsf_qtcb *qtcb;
627
628 if (likely(pool))
629 qtcb = mempool_alloc(pool, GFP_ATOMIC);
630 else
631 qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC);
632
633 if (unlikely(!qtcb))
634 return NULL;
635
636 memset(qtcb, 0, sizeof(*qtcb));
637 return qtcb;
638 }
639
640 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
641 u32 fsf_cmd, u32 sbtype,
642 mempool_t *pool)
643 {
644 struct zfcp_adapter *adapter = qdio->adapter;
645 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
646
647 if (unlikely(!req))
648 return ERR_PTR(-ENOMEM);
649
650 if (adapter->req_no == 0)
651 adapter->req_no++;
652
653 INIT_LIST_HEAD(&req->list);
654 init_timer(&req->timer);
655 init_completion(&req->completion);
656
657 req->adapter = adapter;
658 req->fsf_command = fsf_cmd;
659 req->req_id = adapter->req_no;
660
661 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
662 if (likely(pool))
663 req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
664 else
665 req->qtcb = zfcp_qtcb_alloc(NULL);
666
667 if (unlikely(!req->qtcb)) {
668 zfcp_fsf_req_free(req);
669 return ERR_PTR(-ENOMEM);
670 }
671
672 req->seq_no = adapter->fsf_req_seq_no;
673 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
674 req->qtcb->prefix.req_id = req->req_id;
675 req->qtcb->prefix.ulp_info = 26;
676 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
677 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
678 req->qtcb->header.req_handle = req->req_id;
679 req->qtcb->header.fsf_command = req->fsf_command;
680 }
681
682 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
683 req->qtcb, sizeof(struct fsf_qtcb));
684
685 return req;
686 }
687
688 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
689 {
690 struct zfcp_adapter *adapter = req->adapter;
691 struct zfcp_qdio *qdio = adapter->qdio;
692 int with_qtcb = (req->qtcb != NULL);
693 int req_id = req->req_id;
694
695 zfcp_reqlist_add(adapter->req_list, req);
696
697 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
698 req->issued = get_clock();
699 if (zfcp_qdio_send(qdio, &req->qdio_req)) {
700 del_timer(&req->timer);
701 /* lookup request again, list might have changed */
702 zfcp_reqlist_find_rm(adapter->req_list, req_id);
703 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
704 return -EIO;
705 }
706
707 /* Don't increase for unsolicited status */
708 if (with_qtcb)
709 adapter->fsf_req_seq_no++;
710 adapter->req_no++;
711
712 return 0;
713 }
714
715 /**
716 * zfcp_fsf_status_read - send status read request
717 * @adapter: pointer to struct zfcp_adapter
718 * @req_flags: request flags
719 * Returns: 0 on success, ERROR otherwise
720 */
721 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
722 {
723 struct zfcp_adapter *adapter = qdio->adapter;
724 struct zfcp_fsf_req *req;
725 struct fsf_status_read_buffer *sr_buf;
726 int retval = -EIO;
727
728 spin_lock_irq(&qdio->req_q_lock);
729 if (zfcp_qdio_sbal_get(qdio))
730 goto out;
731
732 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 0,
733 adapter->pool.status_read_req);
734 if (IS_ERR(req)) {
735 retval = PTR_ERR(req);
736 goto out;
737 }
738
739 sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
740 if (!sr_buf) {
741 retval = -ENOMEM;
742 goto failed_buf;
743 }
744 memset(sr_buf, 0, sizeof(*sr_buf));
745 req->data = sr_buf;
746
747 zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
748 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
749
750 retval = zfcp_fsf_req_send(req);
751 if (retval)
752 goto failed_req_send;
753
754 goto out;
755
756 failed_req_send:
757 mempool_free(sr_buf, adapter->pool.status_read_data);
758 failed_buf:
759 zfcp_fsf_req_free(req);
760 zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL);
761 out:
762 spin_unlock_irq(&qdio->req_q_lock);
763 return retval;
764 }
765
766 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
767 {
768 struct scsi_device *sdev = req->data;
769 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
770 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
771
772 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
773 return;
774
775 switch (req->qtcb->header.fsf_status) {
776 case FSF_PORT_HANDLE_NOT_VALID:
777 if (fsq->word[0] == fsq->word[1]) {
778 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
779 "fsafch1", req);
780 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
781 }
782 break;
783 case FSF_LUN_HANDLE_NOT_VALID:
784 if (fsq->word[0] == fsq->word[1]) {
785 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2",
786 req);
787 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
788 }
789 break;
790 case FSF_FCP_COMMAND_DOES_NOT_EXIST:
791 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
792 break;
793 case FSF_PORT_BOXED:
794 zfcp_erp_set_port_status(zfcp_sdev->port,
795 ZFCP_STATUS_COMMON_ACCESS_BOXED);
796 zfcp_erp_port_reopen(zfcp_sdev->port,
797 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3",
798 req);
799 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
800 break;
801 case FSF_LUN_BOXED:
802 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
803 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
804 "fsafch4", req);
805 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
806 break;
807 case FSF_ADAPTER_STATUS_AVAILABLE:
808 switch (fsq->word[0]) {
809 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
810 zfcp_fc_test_link(zfcp_sdev->port);
811 /* fall through */
812 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
813 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
814 break;
815 }
816 break;
817 case FSF_GOOD:
818 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
819 break;
820 }
821 }
822
823 /**
824 * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
825 * @scmnd: The SCSI command to abort
826 * Returns: pointer to struct zfcp_fsf_req
827 */
828
829 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
830 {
831 struct zfcp_fsf_req *req = NULL;
832 struct scsi_device *sdev = scmnd->device;
833 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
834 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
835 unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
836
837 spin_lock_irq(&qdio->req_q_lock);
838 if (zfcp_qdio_sbal_get(qdio))
839 goto out;
840 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
841 SBAL_FLAGS0_TYPE_READ,
842 qdio->adapter->pool.scsi_abort);
843 if (IS_ERR(req)) {
844 req = NULL;
845 goto out;
846 }
847
848 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
849 ZFCP_STATUS_COMMON_UNBLOCKED)))
850 goto out_error_free;
851
852 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
853
854 req->data = sdev;
855 req->handler = zfcp_fsf_abort_fcp_command_handler;
856 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
857 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
858 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
859
860 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
861 if (!zfcp_fsf_req_send(req))
862 goto out;
863
864 out_error_free:
865 zfcp_fsf_req_free(req);
866 req = NULL;
867 out:
868 spin_unlock_irq(&qdio->req_q_lock);
869 return req;
870 }
871
872 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
873 {
874 struct zfcp_adapter *adapter = req->adapter;
875 struct zfcp_fsf_ct_els *ct = req->data;
876 struct fsf_qtcb_header *header = &req->qtcb->header;
877
878 ct->status = -EINVAL;
879
880 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
881 goto skip_fsfstatus;
882
883 switch (header->fsf_status) {
884 case FSF_GOOD:
885 zfcp_dbf_san_ct_response(req);
886 ct->status = 0;
887 break;
888 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
889 zfcp_fsf_class_not_supp(req);
890 break;
891 case FSF_ADAPTER_STATUS_AVAILABLE:
892 switch (header->fsf_status_qual.word[0]){
893 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
894 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
895 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
896 break;
897 }
898 break;
899 case FSF_ACCESS_DENIED:
900 break;
901 case FSF_PORT_BOXED:
902 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
903 break;
904 case FSF_PORT_HANDLE_NOT_VALID:
905 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
906 /* fall through */
907 case FSF_GENERIC_COMMAND_REJECTED:
908 case FSF_PAYLOAD_SIZE_MISMATCH:
909 case FSF_REQUEST_SIZE_TOO_LARGE:
910 case FSF_RESPONSE_SIZE_TOO_LARGE:
911 case FSF_SBAL_MISMATCH:
912 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
913 break;
914 }
915
916 skip_fsfstatus:
917 if (ct->handler)
918 ct->handler(ct->handler_data);
919 }
920
921 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
922 struct zfcp_qdio_req *q_req,
923 struct scatterlist *sg_req,
924 struct scatterlist *sg_resp)
925 {
926 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
927 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
928 zfcp_qdio_set_sbale_last(qdio, q_req);
929 }
930
931 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
932 struct scatterlist *sg_req,
933 struct scatterlist *sg_resp)
934 {
935 struct zfcp_adapter *adapter = req->adapter;
936 u32 feat = adapter->adapter_features;
937 int bytes;
938
939 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
940 if (!zfcp_qdio_sg_one_sbale(sg_req) ||
941 !zfcp_qdio_sg_one_sbale(sg_resp))
942 return -EOPNOTSUPP;
943
944 zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
945 sg_req, sg_resp);
946 return 0;
947 }
948
949 /* use single, unchained SBAL if it can hold the request */
950 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
951 zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
952 sg_req, sg_resp);
953 return 0;
954 }
955
956 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, sg_req);
957 if (bytes <= 0)
958 return -EIO;
959 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
960 req->qtcb->bottom.support.req_buf_length = bytes;
961 zfcp_qdio_skip_to_last_sbale(&req->qdio_req);
962
963 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
964 sg_resp);
965 req->qtcb->bottom.support.resp_buf_length = bytes;
966 if (bytes <= 0)
967 return -EIO;
968 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
969
970 return 0;
971 }
972
973 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
974 struct scatterlist *sg_req,
975 struct scatterlist *sg_resp,
976 unsigned int timeout)
977 {
978 int ret;
979
980 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
981 if (ret)
982 return ret;
983
984 /* common settings for ct/gs and els requests */
985 if (timeout > 255)
986 timeout = 255; /* max value accepted by hardware */
987 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
988 req->qtcb->bottom.support.timeout = timeout;
989 zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
990
991 return 0;
992 }
993
994 /**
995 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
996 * @ct: pointer to struct zfcp_send_ct with data for request
997 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
998 */
999 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1000 struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1001 unsigned int timeout)
1002 {
1003 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1004 struct zfcp_fsf_req *req;
1005 int ret = -EIO;
1006
1007 spin_lock_irq(&qdio->req_q_lock);
1008 if (zfcp_qdio_sbal_get(qdio))
1009 goto out;
1010
1011 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1012 SBAL_FLAGS0_TYPE_WRITE_READ, pool);
1013
1014 if (IS_ERR(req)) {
1015 ret = PTR_ERR(req);
1016 goto out;
1017 }
1018
1019 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1020 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
1021 if (ret)
1022 goto failed_send;
1023
1024 req->handler = zfcp_fsf_send_ct_handler;
1025 req->qtcb->header.port_handle = wka_port->handle;
1026 req->data = ct;
1027
1028 zfcp_dbf_san_ct_request(req, wka_port->d_id);
1029
1030 ret = zfcp_fsf_req_send(req);
1031 if (ret)
1032 goto failed_send;
1033
1034 goto out;
1035
1036 failed_send:
1037 zfcp_fsf_req_free(req);
1038 out:
1039 spin_unlock_irq(&qdio->req_q_lock);
1040 return ret;
1041 }
1042
1043 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1044 {
1045 struct zfcp_fsf_ct_els *send_els = req->data;
1046 struct zfcp_port *port = send_els->port;
1047 struct fsf_qtcb_header *header = &req->qtcb->header;
1048
1049 send_els->status = -EINVAL;
1050
1051 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1052 goto skip_fsfstatus;
1053
1054 switch (header->fsf_status) {
1055 case FSF_GOOD:
1056 zfcp_dbf_san_els_response(req);
1057 send_els->status = 0;
1058 break;
1059 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1060 zfcp_fsf_class_not_supp(req);
1061 break;
1062 case FSF_ADAPTER_STATUS_AVAILABLE:
1063 switch (header->fsf_status_qual.word[0]){
1064 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1065 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1066 case FSF_SQ_RETRY_IF_POSSIBLE:
1067 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1068 break;
1069 }
1070 break;
1071 case FSF_ELS_COMMAND_REJECTED:
1072 case FSF_PAYLOAD_SIZE_MISMATCH:
1073 case FSF_REQUEST_SIZE_TOO_LARGE:
1074 case FSF_RESPONSE_SIZE_TOO_LARGE:
1075 break;
1076 case FSF_ACCESS_DENIED:
1077 if (port) {
1078 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1079 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1080 }
1081 break;
1082 case FSF_SBAL_MISMATCH:
1083 /* should never occure, avoided in zfcp_fsf_send_els */
1084 /* fall through */
1085 default:
1086 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1087 break;
1088 }
1089 skip_fsfstatus:
1090 if (send_els->handler)
1091 send_els->handler(send_els->handler_data);
1092 }
1093
1094 /**
1095 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1096 * @els: pointer to struct zfcp_send_els with data for the command
1097 */
1098 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1099 struct zfcp_fsf_ct_els *els, unsigned int timeout)
1100 {
1101 struct zfcp_fsf_req *req;
1102 struct zfcp_qdio *qdio = adapter->qdio;
1103 int ret = -EIO;
1104
1105 spin_lock_irq(&qdio->req_q_lock);
1106 if (zfcp_qdio_sbal_get(qdio))
1107 goto out;
1108
1109 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1110 SBAL_FLAGS0_TYPE_WRITE_READ, NULL);
1111
1112 if (IS_ERR(req)) {
1113 ret = PTR_ERR(req);
1114 goto out;
1115 }
1116
1117 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1118
1119 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1120
1121 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1122
1123 if (ret)
1124 goto failed_send;
1125
1126 hton24(req->qtcb->bottom.support.d_id, d_id);
1127 req->handler = zfcp_fsf_send_els_handler;
1128 req->data = els;
1129
1130 zfcp_dbf_san_els_request(req);
1131
1132 ret = zfcp_fsf_req_send(req);
1133 if (ret)
1134 goto failed_send;
1135
1136 goto out;
1137
1138 failed_send:
1139 zfcp_fsf_req_free(req);
1140 out:
1141 spin_unlock_irq(&qdio->req_q_lock);
1142 return ret;
1143 }
1144
1145 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1146 {
1147 struct zfcp_fsf_req *req;
1148 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1149 int retval = -EIO;
1150
1151 spin_lock_irq(&qdio->req_q_lock);
1152 if (zfcp_qdio_sbal_get(qdio))
1153 goto out;
1154
1155 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1156 SBAL_FLAGS0_TYPE_READ,
1157 qdio->adapter->pool.erp_req);
1158
1159 if (IS_ERR(req)) {
1160 retval = PTR_ERR(req);
1161 goto out;
1162 }
1163
1164 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1165 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1166
1167 req->qtcb->bottom.config.feature_selection =
1168 FSF_FEATURE_CFDC |
1169 FSF_FEATURE_LUN_SHARING |
1170 FSF_FEATURE_NOTIFICATION_LOST |
1171 FSF_FEATURE_UPDATE_ALERT;
1172 req->erp_action = erp_action;
1173 req->handler = zfcp_fsf_exchange_config_data_handler;
1174 erp_action->fsf_req_id = req->req_id;
1175
1176 zfcp_fsf_start_erp_timer(req);
1177 retval = zfcp_fsf_req_send(req);
1178 if (retval) {
1179 zfcp_fsf_req_free(req);
1180 erp_action->fsf_req_id = 0;
1181 }
1182 out:
1183 spin_unlock_irq(&qdio->req_q_lock);
1184 return retval;
1185 }
1186
1187 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1188 struct fsf_qtcb_bottom_config *data)
1189 {
1190 struct zfcp_fsf_req *req = NULL;
1191 int retval = -EIO;
1192
1193 spin_lock_irq(&qdio->req_q_lock);
1194 if (zfcp_qdio_sbal_get(qdio))
1195 goto out_unlock;
1196
1197 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1198 SBAL_FLAGS0_TYPE_READ, NULL);
1199
1200 if (IS_ERR(req)) {
1201 retval = PTR_ERR(req);
1202 goto out_unlock;
1203 }
1204
1205 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1206 req->handler = zfcp_fsf_exchange_config_data_handler;
1207
1208 req->qtcb->bottom.config.feature_selection =
1209 FSF_FEATURE_CFDC |
1210 FSF_FEATURE_LUN_SHARING |
1211 FSF_FEATURE_NOTIFICATION_LOST |
1212 FSF_FEATURE_UPDATE_ALERT;
1213
1214 if (data)
1215 req->data = data;
1216
1217 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1218 retval = zfcp_fsf_req_send(req);
1219 spin_unlock_irq(&qdio->req_q_lock);
1220 if (!retval)
1221 wait_for_completion(&req->completion);
1222
1223 zfcp_fsf_req_free(req);
1224 return retval;
1225
1226 out_unlock:
1227 spin_unlock_irq(&qdio->req_q_lock);
1228 return retval;
1229 }
1230
1231 /**
1232 * zfcp_fsf_exchange_port_data - request information about local port
1233 * @erp_action: ERP action for the adapter for which port data is requested
1234 * Returns: 0 on success, error otherwise
1235 */
1236 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1237 {
1238 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1239 struct zfcp_fsf_req *req;
1240 int retval = -EIO;
1241
1242 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1243 return -EOPNOTSUPP;
1244
1245 spin_lock_irq(&qdio->req_q_lock);
1246 if (zfcp_qdio_sbal_get(qdio))
1247 goto out;
1248
1249 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1250 SBAL_FLAGS0_TYPE_READ,
1251 qdio->adapter->pool.erp_req);
1252
1253 if (IS_ERR(req)) {
1254 retval = PTR_ERR(req);
1255 goto out;
1256 }
1257
1258 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1259 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1260
1261 req->handler = zfcp_fsf_exchange_port_data_handler;
1262 req->erp_action = erp_action;
1263 erp_action->fsf_req_id = req->req_id;
1264
1265 zfcp_fsf_start_erp_timer(req);
1266 retval = zfcp_fsf_req_send(req);
1267 if (retval) {
1268 zfcp_fsf_req_free(req);
1269 erp_action->fsf_req_id = 0;
1270 }
1271 out:
1272 spin_unlock_irq(&qdio->req_q_lock);
1273 return retval;
1274 }
1275
1276 /**
1277 * zfcp_fsf_exchange_port_data_sync - request information about local port
1278 * @qdio: pointer to struct zfcp_qdio
1279 * @data: pointer to struct fsf_qtcb_bottom_port
1280 * Returns: 0 on success, error otherwise
1281 */
1282 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1283 struct fsf_qtcb_bottom_port *data)
1284 {
1285 struct zfcp_fsf_req *req = NULL;
1286 int retval = -EIO;
1287
1288 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1289 return -EOPNOTSUPP;
1290
1291 spin_lock_irq(&qdio->req_q_lock);
1292 if (zfcp_qdio_sbal_get(qdio))
1293 goto out_unlock;
1294
1295 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1296 SBAL_FLAGS0_TYPE_READ, NULL);
1297
1298 if (IS_ERR(req)) {
1299 retval = PTR_ERR(req);
1300 goto out_unlock;
1301 }
1302
1303 if (data)
1304 req->data = data;
1305
1306 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1307
1308 req->handler = zfcp_fsf_exchange_port_data_handler;
1309 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1310 retval = zfcp_fsf_req_send(req);
1311 spin_unlock_irq(&qdio->req_q_lock);
1312
1313 if (!retval)
1314 wait_for_completion(&req->completion);
1315
1316 zfcp_fsf_req_free(req);
1317
1318 return retval;
1319
1320 out_unlock:
1321 spin_unlock_irq(&qdio->req_q_lock);
1322 return retval;
1323 }
1324
1325 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1326 {
1327 struct zfcp_port *port = req->data;
1328 struct fsf_qtcb_header *header = &req->qtcb->header;
1329 struct fc_els_flogi *plogi;
1330
1331 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1332 goto out;
1333
1334 switch (header->fsf_status) {
1335 case FSF_PORT_ALREADY_OPEN:
1336 break;
1337 case FSF_ACCESS_DENIED:
1338 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1339 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1340 break;
1341 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1342 dev_warn(&req->adapter->ccw_device->dev,
1343 "Not enough FCP adapter resources to open "
1344 "remote port 0x%016Lx\n",
1345 (unsigned long long)port->wwpn);
1346 zfcp_erp_set_port_status(port,
1347 ZFCP_STATUS_COMMON_ERP_FAILED);
1348 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1349 break;
1350 case FSF_ADAPTER_STATUS_AVAILABLE:
1351 switch (header->fsf_status_qual.word[0]) {
1352 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1353 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1354 case FSF_SQ_NO_RETRY_POSSIBLE:
1355 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1356 break;
1357 }
1358 break;
1359 case FSF_GOOD:
1360 port->handle = header->port_handle;
1361 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
1362 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1363 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1364 ZFCP_STATUS_COMMON_ACCESS_BOXED,
1365 &port->status);
1366 /* check whether D_ID has changed during open */
1367 /*
1368 * FIXME: This check is not airtight, as the FCP channel does
1369 * not monitor closures of target port connections caused on
1370 * the remote side. Thus, they might miss out on invalidating
1371 * locally cached WWPNs (and other N_Port parameters) of gone
1372 * target ports. So, our heroic attempt to make things safe
1373 * could be undermined by 'open port' response data tagged with
1374 * obsolete WWPNs. Another reason to monitor potential
1375 * connection closures ourself at least (by interpreting
1376 * incoming ELS' and unsolicited status). It just crosses my
1377 * mind that one should be able to cross-check by means of
1378 * another GID_PN straight after a port has been opened.
1379 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1380 */
1381 plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
1382 if (req->qtcb->bottom.support.els1_length >=
1383 FSF_PLOGI_MIN_LEN)
1384 zfcp_fc_plogi_evaluate(port, plogi);
1385 break;
1386 case FSF_UNKNOWN_OP_SUBTYPE:
1387 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1388 break;
1389 }
1390
1391 out:
1392 put_device(&port->dev);
1393 }
1394
1395 /**
1396 * zfcp_fsf_open_port - create and send open port request
1397 * @erp_action: pointer to struct zfcp_erp_action
1398 * Returns: 0 on success, error otherwise
1399 */
1400 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1401 {
1402 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1403 struct zfcp_port *port = erp_action->port;
1404 struct zfcp_fsf_req *req;
1405 int retval = -EIO;
1406
1407 spin_lock_irq(&qdio->req_q_lock);
1408 if (zfcp_qdio_sbal_get(qdio))
1409 goto out;
1410
1411 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1412 SBAL_FLAGS0_TYPE_READ,
1413 qdio->adapter->pool.erp_req);
1414
1415 if (IS_ERR(req)) {
1416 retval = PTR_ERR(req);
1417 goto out;
1418 }
1419
1420 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1421 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1422
1423 req->handler = zfcp_fsf_open_port_handler;
1424 hton24(req->qtcb->bottom.support.d_id, port->d_id);
1425 req->data = port;
1426 req->erp_action = erp_action;
1427 erp_action->fsf_req_id = req->req_id;
1428 get_device(&port->dev);
1429
1430 zfcp_fsf_start_erp_timer(req);
1431 retval = zfcp_fsf_req_send(req);
1432 if (retval) {
1433 zfcp_fsf_req_free(req);
1434 erp_action->fsf_req_id = 0;
1435 put_device(&port->dev);
1436 }
1437 out:
1438 spin_unlock_irq(&qdio->req_q_lock);
1439 return retval;
1440 }
1441
1442 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1443 {
1444 struct zfcp_port *port = req->data;
1445
1446 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1447 return;
1448
1449 switch (req->qtcb->header.fsf_status) {
1450 case FSF_PORT_HANDLE_NOT_VALID:
1451 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1", req);
1452 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1453 break;
1454 case FSF_ADAPTER_STATUS_AVAILABLE:
1455 break;
1456 case FSF_GOOD:
1457 zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
1458 break;
1459 }
1460 }
1461
1462 /**
1463 * zfcp_fsf_close_port - create and send close port request
1464 * @erp_action: pointer to struct zfcp_erp_action
1465 * Returns: 0 on success, error otherwise
1466 */
1467 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1468 {
1469 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1470 struct zfcp_fsf_req *req;
1471 int retval = -EIO;
1472
1473 spin_lock_irq(&qdio->req_q_lock);
1474 if (zfcp_qdio_sbal_get(qdio))
1475 goto out;
1476
1477 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1478 SBAL_FLAGS0_TYPE_READ,
1479 qdio->adapter->pool.erp_req);
1480
1481 if (IS_ERR(req)) {
1482 retval = PTR_ERR(req);
1483 goto out;
1484 }
1485
1486 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1487 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1488
1489 req->handler = zfcp_fsf_close_port_handler;
1490 req->data = erp_action->port;
1491 req->erp_action = erp_action;
1492 req->qtcb->header.port_handle = erp_action->port->handle;
1493 erp_action->fsf_req_id = req->req_id;
1494
1495 zfcp_fsf_start_erp_timer(req);
1496 retval = zfcp_fsf_req_send(req);
1497 if (retval) {
1498 zfcp_fsf_req_free(req);
1499 erp_action->fsf_req_id = 0;
1500 }
1501 out:
1502 spin_unlock_irq(&qdio->req_q_lock);
1503 return retval;
1504 }
1505
1506 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1507 {
1508 struct zfcp_fc_wka_port *wka_port = req->data;
1509 struct fsf_qtcb_header *header = &req->qtcb->header;
1510
1511 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1512 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1513 goto out;
1514 }
1515
1516 switch (header->fsf_status) {
1517 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1518 dev_warn(&req->adapter->ccw_device->dev,
1519 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1520 /* fall through */
1521 case FSF_ADAPTER_STATUS_AVAILABLE:
1522 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1523 /* fall through */
1524 case FSF_ACCESS_DENIED:
1525 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1526 break;
1527 case FSF_GOOD:
1528 wka_port->handle = header->port_handle;
1529 /* fall through */
1530 case FSF_PORT_ALREADY_OPEN:
1531 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1532 }
1533 out:
1534 wake_up(&wka_port->completion_wq);
1535 }
1536
1537 /**
1538 * zfcp_fsf_open_wka_port - create and send open wka-port request
1539 * @wka_port: pointer to struct zfcp_fc_wka_port
1540 * Returns: 0 on success, error otherwise
1541 */
1542 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1543 {
1544 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1545 struct zfcp_fsf_req *req;
1546 int retval = -EIO;
1547
1548 spin_lock_irq(&qdio->req_q_lock);
1549 if (zfcp_qdio_sbal_get(qdio))
1550 goto out;
1551
1552 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1553 SBAL_FLAGS0_TYPE_READ,
1554 qdio->adapter->pool.erp_req);
1555
1556 if (unlikely(IS_ERR(req))) {
1557 retval = PTR_ERR(req);
1558 goto out;
1559 }
1560
1561 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1562 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1563
1564 req->handler = zfcp_fsf_open_wka_port_handler;
1565 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1566 req->data = wka_port;
1567
1568 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1569 retval = zfcp_fsf_req_send(req);
1570 if (retval)
1571 zfcp_fsf_req_free(req);
1572 out:
1573 spin_unlock_irq(&qdio->req_q_lock);
1574 return retval;
1575 }
1576
1577 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1578 {
1579 struct zfcp_fc_wka_port *wka_port = req->data;
1580
1581 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1582 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1583 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
1584 }
1585
1586 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1587 wake_up(&wka_port->completion_wq);
1588 }
1589
1590 /**
1591 * zfcp_fsf_close_wka_port - create and send close wka port request
1592 * @wka_port: WKA port to open
1593 * Returns: 0 on success, error otherwise
1594 */
1595 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1596 {
1597 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1598 struct zfcp_fsf_req *req;
1599 int retval = -EIO;
1600
1601 spin_lock_irq(&qdio->req_q_lock);
1602 if (zfcp_qdio_sbal_get(qdio))
1603 goto out;
1604
1605 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1606 SBAL_FLAGS0_TYPE_READ,
1607 qdio->adapter->pool.erp_req);
1608
1609 if (unlikely(IS_ERR(req))) {
1610 retval = PTR_ERR(req);
1611 goto out;
1612 }
1613
1614 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1615 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1616
1617 req->handler = zfcp_fsf_close_wka_port_handler;
1618 req->data = wka_port;
1619 req->qtcb->header.port_handle = wka_port->handle;
1620
1621 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1622 retval = zfcp_fsf_req_send(req);
1623 if (retval)
1624 zfcp_fsf_req_free(req);
1625 out:
1626 spin_unlock_irq(&qdio->req_q_lock);
1627 return retval;
1628 }
1629
1630 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1631 {
1632 struct zfcp_port *port = req->data;
1633 struct fsf_qtcb_header *header = &req->qtcb->header;
1634 struct scsi_device *sdev;
1635
1636 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1637 return;
1638
1639 switch (header->fsf_status) {
1640 case FSF_PORT_HANDLE_NOT_VALID:
1641 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1", req);
1642 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1643 break;
1644 case FSF_ACCESS_DENIED:
1645 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1646 break;
1647 case FSF_PORT_BOXED:
1648 /* can't use generic zfcp_erp_modify_port_status because
1649 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1650 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1651 shost_for_each_device(sdev, port->adapter->scsi_host)
1652 if (sdev_to_zfcp(sdev)->port == port)
1653 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1654 &sdev_to_zfcp(sdev)->status);
1655 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
1656 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
1657 "fscpph2", req);
1658 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1659 break;
1660 case FSF_ADAPTER_STATUS_AVAILABLE:
1661 switch (header->fsf_status_qual.word[0]) {
1662 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1663 /* fall through */
1664 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1665 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1666 break;
1667 }
1668 break;
1669 case FSF_GOOD:
1670 /* can't use generic zfcp_erp_modify_port_status because
1671 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1672 */
1673 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1674 shost_for_each_device(sdev, port->adapter->scsi_host)
1675 if (sdev_to_zfcp(sdev)->port == port)
1676 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1677 &sdev_to_zfcp(sdev)->status);
1678 break;
1679 }
1680 }
1681
1682 /**
1683 * zfcp_fsf_close_physical_port - close physical port
1684 * @erp_action: pointer to struct zfcp_erp_action
1685 * Returns: 0 on success
1686 */
1687 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1688 {
1689 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1690 struct zfcp_fsf_req *req;
1691 int retval = -EIO;
1692
1693 spin_lock_irq(&qdio->req_q_lock);
1694 if (zfcp_qdio_sbal_get(qdio))
1695 goto out;
1696
1697 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1698 SBAL_FLAGS0_TYPE_READ,
1699 qdio->adapter->pool.erp_req);
1700
1701 if (IS_ERR(req)) {
1702 retval = PTR_ERR(req);
1703 goto out;
1704 }
1705
1706 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1707 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1708
1709 req->data = erp_action->port;
1710 req->qtcb->header.port_handle = erp_action->port->handle;
1711 req->erp_action = erp_action;
1712 req->handler = zfcp_fsf_close_physical_port_handler;
1713 erp_action->fsf_req_id = req->req_id;
1714
1715 zfcp_fsf_start_erp_timer(req);
1716 retval = zfcp_fsf_req_send(req);
1717 if (retval) {
1718 zfcp_fsf_req_free(req);
1719 erp_action->fsf_req_id = 0;
1720 }
1721 out:
1722 spin_unlock_irq(&qdio->req_q_lock);
1723 return retval;
1724 }
1725
1726 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1727 {
1728 struct zfcp_adapter *adapter = req->adapter;
1729 struct scsi_device *sdev = req->data;
1730 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1731 struct fsf_qtcb_header *header = &req->qtcb->header;
1732 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1733
1734 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1735 return;
1736
1737 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1738 ZFCP_STATUS_COMMON_ACCESS_BOXED |
1739 ZFCP_STATUS_LUN_SHARED |
1740 ZFCP_STATUS_LUN_READONLY,
1741 &zfcp_sdev->status);
1742
1743 switch (header->fsf_status) {
1744
1745 case FSF_PORT_HANDLE_NOT_VALID:
1746 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1", req);
1747 /* fall through */
1748 case FSF_LUN_ALREADY_OPEN:
1749 break;
1750 case FSF_ACCESS_DENIED:
1751 zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
1752 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1753 break;
1754 case FSF_PORT_BOXED:
1755 zfcp_erp_set_port_status(zfcp_sdev->port,
1756 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1757 zfcp_erp_port_reopen(zfcp_sdev->port,
1758 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2",
1759 req);
1760 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1761 break;
1762 case FSF_LUN_SHARING_VIOLATION:
1763 zfcp_cfdc_lun_shrng_vltn(sdev, &header->fsf_status_qual);
1764 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1765 break;
1766 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1767 dev_warn(&adapter->ccw_device->dev,
1768 "No handle is available for LUN "
1769 "0x%016Lx on port 0x%016Lx\n",
1770 (unsigned long long)zfcp_scsi_dev_lun(sdev),
1771 (unsigned long long)zfcp_sdev->port->wwpn);
1772 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
1773 /* fall through */
1774 case FSF_INVALID_COMMAND_OPTION:
1775 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1776 break;
1777 case FSF_ADAPTER_STATUS_AVAILABLE:
1778 switch (header->fsf_status_qual.word[0]) {
1779 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1780 zfcp_fc_test_link(zfcp_sdev->port);
1781 /* fall through */
1782 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1783 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1784 break;
1785 }
1786 break;
1787
1788 case FSF_GOOD:
1789 zfcp_sdev->lun_handle = header->lun_handle;
1790 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1791 zfcp_cfdc_open_lun_eval(sdev, bottom);
1792 break;
1793 }
1794 }
1795
1796 /**
1797 * zfcp_fsf_open_lun - open LUN
1798 * @erp_action: pointer to struct zfcp_erp_action
1799 * Returns: 0 on success, error otherwise
1800 */
1801 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
1802 {
1803 struct zfcp_adapter *adapter = erp_action->adapter;
1804 struct zfcp_qdio *qdio = adapter->qdio;
1805 struct zfcp_fsf_req *req;
1806 int retval = -EIO;
1807
1808 spin_lock_irq(&qdio->req_q_lock);
1809 if (zfcp_qdio_sbal_get(qdio))
1810 goto out;
1811
1812 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1813 SBAL_FLAGS0_TYPE_READ,
1814 adapter->pool.erp_req);
1815
1816 if (IS_ERR(req)) {
1817 retval = PTR_ERR(req);
1818 goto out;
1819 }
1820
1821 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1822 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1823
1824 req->qtcb->header.port_handle = erp_action->port->handle;
1825 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
1826 req->handler = zfcp_fsf_open_lun_handler;
1827 req->data = erp_action->sdev;
1828 req->erp_action = erp_action;
1829 erp_action->fsf_req_id = req->req_id;
1830
1831 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1832 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1833
1834 zfcp_fsf_start_erp_timer(req);
1835 retval = zfcp_fsf_req_send(req);
1836 if (retval) {
1837 zfcp_fsf_req_free(req);
1838 erp_action->fsf_req_id = 0;
1839 }
1840 out:
1841 spin_unlock_irq(&qdio->req_q_lock);
1842 return retval;
1843 }
1844
1845 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1846 {
1847 struct scsi_device *sdev = req->data;
1848 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1849
1850 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1851 return;
1852
1853 switch (req->qtcb->header.fsf_status) {
1854 case FSF_PORT_HANDLE_NOT_VALID:
1855 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1",
1856 req);
1857 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1858 break;
1859 case FSF_LUN_HANDLE_NOT_VALID:
1860 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2", req);
1861 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1862 break;
1863 case FSF_PORT_BOXED:
1864 zfcp_erp_set_port_status(zfcp_sdev->port,
1865 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1866 zfcp_erp_port_reopen(zfcp_sdev->port,
1867 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3",
1868 req);
1869 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1870 break;
1871 case FSF_ADAPTER_STATUS_AVAILABLE:
1872 switch (req->qtcb->header.fsf_status_qual.word[0]) {
1873 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1874 zfcp_fc_test_link(zfcp_sdev->port);
1875 /* fall through */
1876 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1877 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1878 break;
1879 }
1880 break;
1881 case FSF_GOOD:
1882 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1883 break;
1884 }
1885 }
1886
1887 /**
1888 * zfcp_fsf_close_LUN - close LUN
1889 * @erp_action: pointer to erp_action triggering the "close LUN"
1890 * Returns: 0 on success, error otherwise
1891 */
1892 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
1893 {
1894 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1895 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
1896 struct zfcp_fsf_req *req;
1897 int retval = -EIO;
1898
1899 spin_lock_irq(&qdio->req_q_lock);
1900 if (zfcp_qdio_sbal_get(qdio))
1901 goto out;
1902
1903 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
1904 SBAL_FLAGS0_TYPE_READ,
1905 qdio->adapter->pool.erp_req);
1906
1907 if (IS_ERR(req)) {
1908 retval = PTR_ERR(req);
1909 goto out;
1910 }
1911
1912 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1913 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1914
1915 req->qtcb->header.port_handle = erp_action->port->handle;
1916 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
1917 req->handler = zfcp_fsf_close_lun_handler;
1918 req->data = erp_action->sdev;
1919 req->erp_action = erp_action;
1920 erp_action->fsf_req_id = req->req_id;
1921
1922 zfcp_fsf_start_erp_timer(req);
1923 retval = zfcp_fsf_req_send(req);
1924 if (retval) {
1925 zfcp_fsf_req_free(req);
1926 erp_action->fsf_req_id = 0;
1927 }
1928 out:
1929 spin_unlock_irq(&qdio->req_q_lock);
1930 return retval;
1931 }
1932
1933 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
1934 {
1935 lat_rec->sum += lat;
1936 lat_rec->min = min(lat_rec->min, lat);
1937 lat_rec->max = max(lat_rec->max, lat);
1938 }
1939
1940 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
1941 {
1942 struct fsf_qual_latency_info *lat_in;
1943 struct latency_cont *lat = NULL;
1944 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device);
1945 struct zfcp_blk_drv_data blktrc;
1946 int ticks = req->adapter->timer_ticks;
1947
1948 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
1949
1950 blktrc.flags = 0;
1951 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
1952 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1953 blktrc.flags |= ZFCP_BLK_REQ_ERROR;
1954 blktrc.inb_usage = 0;
1955 blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
1956
1957 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
1958 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
1959 blktrc.flags |= ZFCP_BLK_LAT_VALID;
1960 blktrc.channel_lat = lat_in->channel_lat * ticks;
1961 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
1962
1963 switch (req->qtcb->bottom.io.data_direction) {
1964 case FSF_DATADIR_DIF_READ_STRIP:
1965 case FSF_DATADIR_DIF_READ_CONVERT:
1966 case FSF_DATADIR_READ:
1967 lat = &zfcp_sdev->latencies.read;
1968 break;
1969 case FSF_DATADIR_DIF_WRITE_INSERT:
1970 case FSF_DATADIR_DIF_WRITE_CONVERT:
1971 case FSF_DATADIR_WRITE:
1972 lat = &zfcp_sdev->latencies.write;
1973 break;
1974 case FSF_DATADIR_CMND:
1975 lat = &zfcp_sdev->latencies.cmd;
1976 break;
1977 }
1978
1979 if (lat) {
1980 spin_lock(&zfcp_sdev->latencies.lock);
1981 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
1982 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
1983 lat->counter++;
1984 spin_unlock(&zfcp_sdev->latencies.lock);
1985 }
1986 }
1987
1988 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
1989 sizeof(blktrc));
1990 }
1991
1992 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
1993 {
1994 struct scsi_cmnd *scmnd = req->data;
1995 struct scsi_device *sdev = scmnd->device;
1996 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1997 struct fsf_qtcb_header *header = &req->qtcb->header;
1998
1999 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2000 return;
2001
2002 switch (header->fsf_status) {
2003 case FSF_HANDLE_MISMATCH:
2004 case FSF_PORT_HANDLE_NOT_VALID:
2005 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1",
2006 req);
2007 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2008 break;
2009 case FSF_FCPLUN_NOT_VALID:
2010 case FSF_LUN_HANDLE_NOT_VALID:
2011 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2", req);
2012 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2013 break;
2014 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2015 zfcp_fsf_class_not_supp(req);
2016 break;
2017 case FSF_ACCESS_DENIED:
2018 zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
2019 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2020 break;
2021 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2022 dev_err(&req->adapter->ccw_device->dev,
2023 "Incorrect direction %d, LUN 0x%016Lx on port "
2024 "0x%016Lx closed\n",
2025 req->qtcb->bottom.io.data_direction,
2026 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2027 (unsigned long long)zfcp_sdev->port->wwpn);
2028 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2029 "fssfch3", req);
2030 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2031 break;
2032 case FSF_CMND_LENGTH_NOT_VALID:
2033 dev_err(&req->adapter->ccw_device->dev,
2034 "Incorrect CDB length %d, LUN 0x%016Lx on "
2035 "port 0x%016Lx closed\n",
2036 req->qtcb->bottom.io.fcp_cmnd_length,
2037 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2038 (unsigned long long)zfcp_sdev->port->wwpn);
2039 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2040 "fssfch4", req);
2041 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2042 break;
2043 case FSF_PORT_BOXED:
2044 zfcp_erp_set_port_status(zfcp_sdev->port,
2045 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2046 zfcp_erp_port_reopen(zfcp_sdev->port,
2047 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5",
2048 req);
2049 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2050 break;
2051 case FSF_LUN_BOXED:
2052 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2053 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2054 "fssfch6", req);
2055 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2056 break;
2057 case FSF_ADAPTER_STATUS_AVAILABLE:
2058 if (header->fsf_status_qual.word[0] ==
2059 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2060 zfcp_fc_test_link(zfcp_sdev->port);
2061 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2062 break;
2063 }
2064 }
2065
2066 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2067 {
2068 struct scsi_cmnd *scpnt;
2069 struct fcp_resp_with_ext *fcp_rsp;
2070 unsigned long flags;
2071
2072 read_lock_irqsave(&req->adapter->abort_lock, flags);
2073
2074 scpnt = req->data;
2075 if (unlikely(!scpnt)) {
2076 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2077 return;
2078 }
2079
2080 zfcp_fsf_fcp_handler_common(req);
2081
2082 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2083 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2084 goto skip_fsfstatus;
2085 }
2086
2087 switch (req->qtcb->header.fsf_status) {
2088 case FSF_INCONSISTENT_PROT_DATA:
2089 case FSF_INVALID_PROT_PARM:
2090 set_host_byte(scpnt, DID_ERROR);
2091 goto skip_fsfstatus;
2092 case FSF_BLOCK_GUARD_CHECK_FAILURE:
2093 zfcp_scsi_dif_sense_error(scpnt, 0x1);
2094 goto skip_fsfstatus;
2095 case FSF_APP_TAG_CHECK_FAILURE:
2096 zfcp_scsi_dif_sense_error(scpnt, 0x2);
2097 goto skip_fsfstatus;
2098 case FSF_REF_TAG_CHECK_FAILURE:
2099 zfcp_scsi_dif_sense_error(scpnt, 0x3);
2100 goto skip_fsfstatus;
2101 }
2102 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2103 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2104
2105 skip_fsfstatus:
2106 zfcp_fsf_req_trace(req, scpnt);
2107 zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req);
2108
2109 scpnt->host_scribble = NULL;
2110 (scpnt->scsi_done) (scpnt);
2111 /*
2112 * We must hold this lock until scsi_done has been called.
2113 * Otherwise we may call scsi_done after abort regarding this
2114 * command has completed.
2115 * Note: scsi_done must not block!
2116 */
2117 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2118 }
2119
2120 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2121 {
2122 switch (scsi_get_prot_op(scsi_cmnd)) {
2123 case SCSI_PROT_NORMAL:
2124 switch (scsi_cmnd->sc_data_direction) {
2125 case DMA_NONE:
2126 *data_dir = FSF_DATADIR_CMND;
2127 break;
2128 case DMA_FROM_DEVICE:
2129 *data_dir = FSF_DATADIR_READ;
2130 break;
2131 case DMA_TO_DEVICE:
2132 *data_dir = FSF_DATADIR_WRITE;
2133 break;
2134 case DMA_BIDIRECTIONAL:
2135 return -EINVAL;
2136 }
2137 break;
2138
2139 case SCSI_PROT_READ_STRIP:
2140 *data_dir = FSF_DATADIR_DIF_READ_STRIP;
2141 break;
2142 case SCSI_PROT_WRITE_INSERT:
2143 *data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
2144 break;
2145 case SCSI_PROT_READ_PASS:
2146 *data_dir = FSF_DATADIR_DIF_READ_CONVERT;
2147 break;
2148 case SCSI_PROT_WRITE_PASS:
2149 *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
2150 break;
2151 default:
2152 return -EINVAL;
2153 }
2154
2155 return 0;
2156 }
2157
2158 /**
2159 * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
2160 * @scsi_cmnd: scsi command to be sent
2161 */
2162 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2163 {
2164 struct zfcp_fsf_req *req;
2165 struct fcp_cmnd *fcp_cmnd;
2166 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2167 int real_bytes, retval = -EIO, dix_bytes = 0;
2168 struct scsi_device *sdev = scsi_cmnd->device;
2169 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2170 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2171 struct zfcp_qdio *qdio = adapter->qdio;
2172 struct fsf_qtcb_bottom_io *io;
2173 unsigned long flags;
2174
2175 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2176 ZFCP_STATUS_COMMON_UNBLOCKED)))
2177 return -EBUSY;
2178
2179 spin_lock_irqsave(&qdio->req_q_lock, flags);
2180 if (atomic_read(&qdio->req_q_free) <= 0) {
2181 atomic_inc(&qdio->req_q_full);
2182 goto out;
2183 }
2184
2185 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2186 sbtype = SBAL_FLAGS0_TYPE_WRITE;
2187
2188 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2189 sbtype, adapter->pool.scsi_req);
2190
2191 if (IS_ERR(req)) {
2192 retval = PTR_ERR(req);
2193 goto out;
2194 }
2195
2196 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2197
2198 io = &req->qtcb->bottom.io;
2199 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2200 req->data = scsi_cmnd;
2201 req->handler = zfcp_fsf_fcp_cmnd_handler;
2202 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2203 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2204 io->service_class = FSF_CLASS_3;
2205 io->fcp_cmnd_length = FCP_CMND_LEN;
2206
2207 if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
2208 io->data_block_length = scsi_cmnd->device->sector_size;
2209 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
2210 }
2211
2212 zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
2213
2214 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2215 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2216
2217 if (scsi_prot_sg_count(scsi_cmnd)) {
2218 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2219 scsi_prot_sg_count(scsi_cmnd));
2220 dix_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2221 scsi_prot_sglist(scsi_cmnd));
2222 io->prot_data_length = dix_bytes;
2223 }
2224
2225 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2226 scsi_sglist(scsi_cmnd));
2227
2228 if (unlikely(real_bytes < 0) || unlikely(dix_bytes < 0))
2229 goto failed_scsi_cmnd;
2230
2231 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2232
2233 retval = zfcp_fsf_req_send(req);
2234 if (unlikely(retval))
2235 goto failed_scsi_cmnd;
2236
2237 goto out;
2238
2239 failed_scsi_cmnd:
2240 zfcp_fsf_req_free(req);
2241 scsi_cmnd->host_scribble = NULL;
2242 out:
2243 spin_unlock_irqrestore(&qdio->req_q_lock, flags);
2244 return retval;
2245 }
2246
2247 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2248 {
2249 struct fcp_resp_with_ext *fcp_rsp;
2250 struct fcp_resp_rsp_info *rsp_info;
2251
2252 zfcp_fsf_fcp_handler_common(req);
2253
2254 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2255 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2256
2257 if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2258 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2259 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2260 }
2261
2262 /**
2263 * zfcp_fsf_fcp_task_mgmt - send SCSI task management command
2264 * @scmnd: SCSI command to send the task management command for
2265 * @tm_flags: unsigned byte for task management flags
2266 * Returns: on success pointer to struct fsf_req, NULL otherwise
2267 */
2268 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
2269 u8 tm_flags)
2270 {
2271 struct zfcp_fsf_req *req = NULL;
2272 struct fcp_cmnd *fcp_cmnd;
2273 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
2274 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2275
2276 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2277 ZFCP_STATUS_COMMON_UNBLOCKED)))
2278 return NULL;
2279
2280 spin_lock_irq(&qdio->req_q_lock);
2281 if (zfcp_qdio_sbal_get(qdio))
2282 goto out;
2283
2284 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2285 SBAL_FLAGS0_TYPE_WRITE,
2286 qdio->adapter->pool.scsi_req);
2287
2288 if (IS_ERR(req)) {
2289 req = NULL;
2290 goto out;
2291 }
2292
2293 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
2294 req->data = scmnd;
2295 req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2296 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2297 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2298 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2299 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2300 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2301
2302 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2303
2304 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2305 zfcp_fc_fcp_tm(fcp_cmnd, scmnd->device, tm_flags);
2306
2307 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2308 if (!zfcp_fsf_req_send(req))
2309 goto out;
2310
2311 zfcp_fsf_req_free(req);
2312 req = NULL;
2313 out:
2314 spin_unlock_irq(&qdio->req_q_lock);
2315 return req;
2316 }
2317
2318 static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
2319 {
2320 }
2321
2322 /**
2323 * zfcp_fsf_control_file - control file upload/download
2324 * @adapter: pointer to struct zfcp_adapter
2325 * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
2326 * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
2327 */
2328 struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2329 struct zfcp_fsf_cfdc *fsf_cfdc)
2330 {
2331 struct zfcp_qdio *qdio = adapter->qdio;
2332 struct zfcp_fsf_req *req = NULL;
2333 struct fsf_qtcb_bottom_support *bottom;
2334 int direction, retval = -EIO, bytes;
2335
2336 if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2337 return ERR_PTR(-EOPNOTSUPP);
2338
2339 switch (fsf_cfdc->command) {
2340 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
2341 direction = SBAL_FLAGS0_TYPE_WRITE;
2342 break;
2343 case FSF_QTCB_UPLOAD_CONTROL_FILE:
2344 direction = SBAL_FLAGS0_TYPE_READ;
2345 break;
2346 default:
2347 return ERR_PTR(-EINVAL);
2348 }
2349
2350 spin_lock_irq(&qdio->req_q_lock);
2351 if (zfcp_qdio_sbal_get(qdio))
2352 goto out;
2353
2354 req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, direction, NULL);
2355 if (IS_ERR(req)) {
2356 retval = -EPERM;
2357 goto out;
2358 }
2359
2360 req->handler = zfcp_fsf_control_file_handler;
2361
2362 bottom = &req->qtcb->bottom.support;
2363 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2364 bottom->option = fsf_cfdc->option;
2365
2366 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg);
2367
2368 if (bytes != ZFCP_CFDC_MAX_SIZE) {
2369 zfcp_fsf_req_free(req);
2370 goto out;
2371 }
2372 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2373
2374 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2375 retval = zfcp_fsf_req_send(req);
2376 out:
2377 spin_unlock_irq(&qdio->req_q_lock);
2378
2379 if (!retval) {
2380 wait_for_completion(&req->completion);
2381 return req;
2382 }
2383 return ERR_PTR(retval);
2384 }
2385
2386 /**
2387 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2388 * @adapter: pointer to struct zfcp_adapter
2389 * @sbal_idx: response queue index of SBAL to be processed
2390 */
2391 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2392 {
2393 struct zfcp_adapter *adapter = qdio->adapter;
2394 struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2395 struct qdio_buffer_element *sbale;
2396 struct zfcp_fsf_req *fsf_req;
2397 unsigned long req_id;
2398 int idx;
2399
2400 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2401
2402 sbale = &sbal->element[idx];
2403 req_id = (unsigned long) sbale->addr;
2404 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2405
2406 if (!fsf_req) {
2407 /*
2408 * Unknown request means that we have potentially memory
2409 * corruption and must stop the machine immediately.
2410 */
2411 zfcp_qdio_siosl(adapter);
2412 panic("error: unknown req_id (%lx) on adapter %s.\n",
2413 req_id, dev_name(&adapter->ccw_device->dev));
2414 }
2415
2416 fsf_req->qdio_req.sbal_response = sbal_idx;
2417 zfcp_fsf_req_complete(fsf_req);
2418
2419 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
2420 break;
2421 }
2422 }
This page took 0.080257 seconds and 5 git commands to generate.