Merge tag 'vmwgfx-fixes-4.3-151001' of git://people.freedesktop.org/~thomash/linux...
[deliverable/linux.git] / drivers / s390 / scsi / zfcp_fsf.c
1 /*
2 * zfcp device driver
3 *
4 * Implementation of FSF commands.
5 *
6 * Copyright IBM Corp. 2002, 2013
7 */
8
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <linux/blktrace_api.h>
13 #include <linux/slab.h>
14 #include <scsi/fc/fc_els.h>
15 #include "zfcp_ext.h"
16 #include "zfcp_fc.h"
17 #include "zfcp_dbf.h"
18 #include "zfcp_qdio.h"
19 #include "zfcp_reqlist.h"
20
21 struct kmem_cache *zfcp_fsf_qtcb_cache;
22
23 static void zfcp_fsf_request_timeout_handler(unsigned long data)
24 {
25 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
26 zfcp_qdio_siosl(adapter);
27 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
28 "fsrth_1");
29 }
30
31 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
32 unsigned long timeout)
33 {
34 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
35 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
36 fsf_req->timer.expires = jiffies + timeout;
37 add_timer(&fsf_req->timer);
38 }
39
40 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
41 {
42 BUG_ON(!fsf_req->erp_action);
43 fsf_req->timer.function = zfcp_erp_timeout_handler;
44 fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
45 fsf_req->timer.expires = jiffies + 30 * HZ;
46 add_timer(&fsf_req->timer);
47 }
48
49 /* association between FSF command and FSF QTCB type */
50 static u32 fsf_qtcb_type[] = {
51 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND,
52 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND,
53 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND,
54 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND,
55 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND,
56 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND,
57 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND,
58 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND,
59 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND,
60 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
61 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND,
62 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
63 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
64 };
65
66 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
67 {
68 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
69 "operational because of an unsupported FC class\n");
70 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
71 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
72 }
73
74 /**
75 * zfcp_fsf_req_free - free memory used by fsf request
76 * @fsf_req: pointer to struct zfcp_fsf_req
77 */
78 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
79 {
80 if (likely(req->pool)) {
81 if (likely(req->qtcb))
82 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
83 mempool_free(req, req->pool);
84 return;
85 }
86
87 if (likely(req->qtcb))
88 kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
89 kfree(req);
90 }
91
92 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
93 {
94 unsigned long flags;
95 struct fsf_status_read_buffer *sr_buf = req->data;
96 struct zfcp_adapter *adapter = req->adapter;
97 struct zfcp_port *port;
98 int d_id = ntoh24(sr_buf->d_id);
99
100 read_lock_irqsave(&adapter->port_list_lock, flags);
101 list_for_each_entry(port, &adapter->port_list, list)
102 if (port->d_id == d_id) {
103 zfcp_erp_port_reopen(port, 0, "fssrpc1");
104 break;
105 }
106 read_unlock_irqrestore(&adapter->port_list_lock, flags);
107 }
108
109 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
110 struct fsf_link_down_info *link_down)
111 {
112 struct zfcp_adapter *adapter = req->adapter;
113
114 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
115 return;
116
117 atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
118
119 zfcp_scsi_schedule_rports_block(adapter);
120
121 if (!link_down)
122 goto out;
123
124 switch (link_down->error_code) {
125 case FSF_PSQ_LINK_NO_LIGHT:
126 dev_warn(&req->adapter->ccw_device->dev,
127 "There is no light signal from the local "
128 "fibre channel cable\n");
129 break;
130 case FSF_PSQ_LINK_WRAP_PLUG:
131 dev_warn(&req->adapter->ccw_device->dev,
132 "There is a wrap plug instead of a fibre "
133 "channel cable\n");
134 break;
135 case FSF_PSQ_LINK_NO_FCP:
136 dev_warn(&req->adapter->ccw_device->dev,
137 "The adjacent fibre channel node does not "
138 "support FCP\n");
139 break;
140 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
141 dev_warn(&req->adapter->ccw_device->dev,
142 "The FCP device is suspended because of a "
143 "firmware update\n");
144 break;
145 case FSF_PSQ_LINK_INVALID_WWPN:
146 dev_warn(&req->adapter->ccw_device->dev,
147 "The FCP device detected a WWPN that is "
148 "duplicate or not valid\n");
149 break;
150 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
151 dev_warn(&req->adapter->ccw_device->dev,
152 "The fibre channel fabric does not support NPIV\n");
153 break;
154 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
155 dev_warn(&req->adapter->ccw_device->dev,
156 "The FCP adapter cannot support more NPIV ports\n");
157 break;
158 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
159 dev_warn(&req->adapter->ccw_device->dev,
160 "The adjacent switch cannot support "
161 "more NPIV ports\n");
162 break;
163 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
164 dev_warn(&req->adapter->ccw_device->dev,
165 "The FCP adapter could not log in to the "
166 "fibre channel fabric\n");
167 break;
168 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
169 dev_warn(&req->adapter->ccw_device->dev,
170 "The WWPN assignment file on the FCP adapter "
171 "has been damaged\n");
172 break;
173 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
174 dev_warn(&req->adapter->ccw_device->dev,
175 "The mode table on the FCP adapter "
176 "has been damaged\n");
177 break;
178 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
179 dev_warn(&req->adapter->ccw_device->dev,
180 "All NPIV ports on the FCP adapter have "
181 "been assigned\n");
182 break;
183 default:
184 dev_warn(&req->adapter->ccw_device->dev,
185 "The link between the FCP adapter and "
186 "the FC fabric is down\n");
187 }
188 out:
189 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
190 }
191
192 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
193 {
194 struct fsf_status_read_buffer *sr_buf = req->data;
195 struct fsf_link_down_info *ldi =
196 (struct fsf_link_down_info *) &sr_buf->payload;
197
198 switch (sr_buf->status_subtype) {
199 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
200 zfcp_fsf_link_down_info_eval(req, ldi);
201 break;
202 case FSF_STATUS_READ_SUB_FDISC_FAILED:
203 zfcp_fsf_link_down_info_eval(req, ldi);
204 break;
205 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
206 zfcp_fsf_link_down_info_eval(req, NULL);
207 }
208 }
209
210 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
211 {
212 struct zfcp_adapter *adapter = req->adapter;
213 struct fsf_status_read_buffer *sr_buf = req->data;
214
215 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
216 zfcp_dbf_hba_fsf_uss("fssrh_1", req);
217 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
218 zfcp_fsf_req_free(req);
219 return;
220 }
221
222 zfcp_dbf_hba_fsf_uss("fssrh_4", req);
223
224 switch (sr_buf->status_type) {
225 case FSF_STATUS_READ_PORT_CLOSED:
226 zfcp_fsf_status_read_port_closed(req);
227 break;
228 case FSF_STATUS_READ_INCOMING_ELS:
229 zfcp_fc_incoming_els(req);
230 break;
231 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
232 break;
233 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
234 dev_warn(&adapter->ccw_device->dev,
235 "The error threshold for checksum statistics "
236 "has been exceeded\n");
237 zfcp_dbf_hba_bit_err("fssrh_3", req);
238 break;
239 case FSF_STATUS_READ_LINK_DOWN:
240 zfcp_fsf_status_read_link_down(req);
241 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
242 break;
243 case FSF_STATUS_READ_LINK_UP:
244 dev_info(&adapter->ccw_device->dev,
245 "The local link has been restored\n");
246 /* All ports should be marked as ready to run again */
247 zfcp_erp_set_adapter_status(adapter,
248 ZFCP_STATUS_COMMON_RUNNING);
249 zfcp_erp_adapter_reopen(adapter,
250 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
251 ZFCP_STATUS_COMMON_ERP_FAILED,
252 "fssrh_2");
253 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
254
255 break;
256 case FSF_STATUS_READ_NOTIFICATION_LOST:
257 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
258 zfcp_fc_conditional_port_scan(adapter);
259 break;
260 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
261 adapter->adapter_features = sr_buf->payload.word[0];
262 break;
263 }
264
265 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
266 zfcp_fsf_req_free(req);
267
268 atomic_inc(&adapter->stat_miss);
269 queue_work(adapter->work_queue, &adapter->stat_work);
270 }
271
272 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
273 {
274 switch (req->qtcb->header.fsf_status_qual.word[0]) {
275 case FSF_SQ_FCP_RSP_AVAILABLE:
276 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
277 case FSF_SQ_NO_RETRY_POSSIBLE:
278 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
279 return;
280 case FSF_SQ_COMMAND_ABORTED:
281 break;
282 case FSF_SQ_NO_RECOM:
283 dev_err(&req->adapter->ccw_device->dev,
284 "The FCP adapter reported a problem "
285 "that cannot be recovered\n");
286 zfcp_qdio_siosl(req->adapter);
287 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
288 break;
289 }
290 /* all non-return stats set FSFREQ_ERROR*/
291 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
292 }
293
294 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
295 {
296 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
297 return;
298
299 switch (req->qtcb->header.fsf_status) {
300 case FSF_UNKNOWN_COMMAND:
301 dev_err(&req->adapter->ccw_device->dev,
302 "The FCP adapter does not recognize the command 0x%x\n",
303 req->qtcb->header.fsf_command);
304 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
305 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
306 break;
307 case FSF_ADAPTER_STATUS_AVAILABLE:
308 zfcp_fsf_fsfstatus_qual_eval(req);
309 break;
310 }
311 }
312
313 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
314 {
315 struct zfcp_adapter *adapter = req->adapter;
316 struct fsf_qtcb *qtcb = req->qtcb;
317 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
318
319 zfcp_dbf_hba_fsf_response(req);
320
321 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
322 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
323 return;
324 }
325
326 switch (qtcb->prefix.prot_status) {
327 case FSF_PROT_GOOD:
328 case FSF_PROT_FSF_STATUS_PRESENTED:
329 return;
330 case FSF_PROT_QTCB_VERSION_ERROR:
331 dev_err(&adapter->ccw_device->dev,
332 "QTCB version 0x%x not supported by FCP adapter "
333 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
334 psq->word[0], psq->word[1]);
335 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
336 break;
337 case FSF_PROT_ERROR_STATE:
338 case FSF_PROT_SEQ_NUMB_ERROR:
339 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
340 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
341 break;
342 case FSF_PROT_UNSUPP_QTCB_TYPE:
343 dev_err(&adapter->ccw_device->dev,
344 "The QTCB type is not supported by the FCP adapter\n");
345 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
346 break;
347 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
348 atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
349 &adapter->status);
350 break;
351 case FSF_PROT_DUPLICATE_REQUEST_ID:
352 dev_err(&adapter->ccw_device->dev,
353 "0x%Lx is an ambiguous request identifier\n",
354 (unsigned long long)qtcb->bottom.support.req_handle);
355 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
356 break;
357 case FSF_PROT_LINK_DOWN:
358 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
359 /* go through reopen to flush pending requests */
360 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
361 break;
362 case FSF_PROT_REEST_QUEUE:
363 /* All ports should be marked as ready to run again */
364 zfcp_erp_set_adapter_status(adapter,
365 ZFCP_STATUS_COMMON_RUNNING);
366 zfcp_erp_adapter_reopen(adapter,
367 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
368 ZFCP_STATUS_COMMON_ERP_FAILED,
369 "fspse_8");
370 break;
371 default:
372 dev_err(&adapter->ccw_device->dev,
373 "0x%x is not a valid transfer protocol status\n",
374 qtcb->prefix.prot_status);
375 zfcp_qdio_siosl(adapter);
376 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
377 }
378 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
379 }
380
381 /**
382 * zfcp_fsf_req_complete - process completion of a FSF request
383 * @fsf_req: The FSF request that has been completed.
384 *
385 * When a request has been completed either from the FCP adapter,
386 * or it has been dismissed due to a queue shutdown, this function
387 * is called to process the completion status and trigger further
388 * events related to the FSF request.
389 */
390 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
391 {
392 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
393 zfcp_fsf_status_read_handler(req);
394 return;
395 }
396
397 del_timer(&req->timer);
398 zfcp_fsf_protstatus_eval(req);
399 zfcp_fsf_fsfstatus_eval(req);
400 req->handler(req);
401
402 if (req->erp_action)
403 zfcp_erp_notify(req->erp_action, 0);
404
405 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
406 zfcp_fsf_req_free(req);
407 else
408 complete(&req->completion);
409 }
410
411 /**
412 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
413 * @adapter: pointer to struct zfcp_adapter
414 *
415 * Never ever call this without shutting down the adapter first.
416 * Otherwise the adapter would continue using and corrupting s390 storage.
417 * Included BUG_ON() call to ensure this is done.
418 * ERP is supposed to be the only user of this function.
419 */
420 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
421 {
422 struct zfcp_fsf_req *req, *tmp;
423 LIST_HEAD(remove_queue);
424
425 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
426 zfcp_reqlist_move(adapter->req_list, &remove_queue);
427
428 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
429 list_del(&req->list);
430 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
431 zfcp_fsf_req_complete(req);
432 }
433 }
434
435 #define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0)
436 #define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1)
437 #define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2)
438 #define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3)
439 #define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4)
440 #define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5)
441 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
442
443 static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
444 {
445 u32 fdmi_speed = 0;
446 if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
447 fdmi_speed |= FC_PORTSPEED_1GBIT;
448 if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
449 fdmi_speed |= FC_PORTSPEED_2GBIT;
450 if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
451 fdmi_speed |= FC_PORTSPEED_4GBIT;
452 if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
453 fdmi_speed |= FC_PORTSPEED_10GBIT;
454 if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
455 fdmi_speed |= FC_PORTSPEED_8GBIT;
456 if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
457 fdmi_speed |= FC_PORTSPEED_16GBIT;
458 if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
459 fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
460 return fdmi_speed;
461 }
462
463 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
464 {
465 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
466 struct zfcp_adapter *adapter = req->adapter;
467 struct Scsi_Host *shost = adapter->scsi_host;
468 struct fc_els_flogi *nsp, *plogi;
469
470 /* adjust pointers for missing command code */
471 nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
472 - sizeof(u32));
473 plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
474 - sizeof(u32));
475
476 if (req->data)
477 memcpy(req->data, bottom, sizeof(*bottom));
478
479 fc_host_port_name(shost) = nsp->fl_wwpn;
480 fc_host_node_name(shost) = nsp->fl_wwnn;
481 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
482
483 adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
484 adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
485 (u16)FSF_STATUS_READS_RECOM);
486
487 if (fc_host_permanent_port_name(shost) == -1)
488 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
489
490 zfcp_scsi_set_prot(adapter);
491
492 /* no error return above here, otherwise must fix call chains */
493 /* do not evaluate invalid fields */
494 if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
495 return 0;
496
497 fc_host_port_id(shost) = ntoh24(bottom->s_id);
498 fc_host_speed(shost) =
499 zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
500
501 adapter->hydra_version = bottom->adapter_type;
502
503 switch (bottom->fc_topology) {
504 case FSF_TOPO_P2P:
505 adapter->peer_d_id = ntoh24(bottom->peer_d_id);
506 adapter->peer_wwpn = plogi->fl_wwpn;
507 adapter->peer_wwnn = plogi->fl_wwnn;
508 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
509 break;
510 case FSF_TOPO_FABRIC:
511 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
512 break;
513 case FSF_TOPO_AL:
514 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
515 /* fall through */
516 default:
517 dev_err(&adapter->ccw_device->dev,
518 "Unknown or unsupported arbitrated loop "
519 "fibre channel topology detected\n");
520 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
521 return -EIO;
522 }
523
524 return 0;
525 }
526
527 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
528 {
529 struct zfcp_adapter *adapter = req->adapter;
530 struct fsf_qtcb *qtcb = req->qtcb;
531 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
532 struct Scsi_Host *shost = adapter->scsi_host;
533
534 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
535 return;
536
537 adapter->fsf_lic_version = bottom->lic_version;
538 adapter->adapter_features = bottom->adapter_features;
539 adapter->connection_features = bottom->connection_features;
540 adapter->peer_wwpn = 0;
541 adapter->peer_wwnn = 0;
542 adapter->peer_d_id = 0;
543
544 switch (qtcb->header.fsf_status) {
545 case FSF_GOOD:
546 if (zfcp_fsf_exchange_config_evaluate(req))
547 return;
548
549 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
550 dev_err(&adapter->ccw_device->dev,
551 "FCP adapter maximum QTCB size (%d bytes) "
552 "is too small\n",
553 bottom->max_qtcb_size);
554 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
555 return;
556 }
557 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
558 &adapter->status);
559 break;
560 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
561 fc_host_node_name(shost) = 0;
562 fc_host_port_name(shost) = 0;
563 fc_host_port_id(shost) = 0;
564 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
565 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
566 adapter->hydra_version = 0;
567
568 /* avoids adapter shutdown to be able to recognize
569 * events such as LINK UP */
570 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
571 &adapter->status);
572 zfcp_fsf_link_down_info_eval(req,
573 &qtcb->header.fsf_status_qual.link_down_info);
574 if (zfcp_fsf_exchange_config_evaluate(req))
575 return;
576 break;
577 default:
578 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
579 return;
580 }
581
582 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
583 adapter->hardware_version = bottom->hardware_version;
584 memcpy(fc_host_serial_number(shost), bottom->serial_number,
585 min(FC_SERIAL_NUMBER_SIZE, 17));
586 EBCASC(fc_host_serial_number(shost),
587 min(FC_SERIAL_NUMBER_SIZE, 17));
588 }
589
590 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
591 dev_err(&adapter->ccw_device->dev,
592 "The FCP adapter only supports newer "
593 "control block versions\n");
594 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
595 return;
596 }
597 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
598 dev_err(&adapter->ccw_device->dev,
599 "The FCP adapter only supports older "
600 "control block versions\n");
601 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
602 }
603 }
604
605 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
606 {
607 struct zfcp_adapter *adapter = req->adapter;
608 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
609 struct Scsi_Host *shost = adapter->scsi_host;
610
611 if (req->data)
612 memcpy(req->data, bottom, sizeof(*bottom));
613
614 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
615 fc_host_permanent_port_name(shost) = bottom->wwpn;
616 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
617 } else
618 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
619 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
620 fc_host_supported_speeds(shost) =
621 zfcp_fsf_convert_portspeed(bottom->supported_speed);
622 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
623 FC_FC4_LIST_SIZE);
624 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
625 FC_FC4_LIST_SIZE);
626 }
627
628 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
629 {
630 struct fsf_qtcb *qtcb = req->qtcb;
631
632 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
633 return;
634
635 switch (qtcb->header.fsf_status) {
636 case FSF_GOOD:
637 zfcp_fsf_exchange_port_evaluate(req);
638 break;
639 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
640 zfcp_fsf_exchange_port_evaluate(req);
641 zfcp_fsf_link_down_info_eval(req,
642 &qtcb->header.fsf_status_qual.link_down_info);
643 break;
644 }
645 }
646
647 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
648 {
649 struct zfcp_fsf_req *req;
650
651 if (likely(pool))
652 req = mempool_alloc(pool, GFP_ATOMIC);
653 else
654 req = kmalloc(sizeof(*req), GFP_ATOMIC);
655
656 if (unlikely(!req))
657 return NULL;
658
659 memset(req, 0, sizeof(*req));
660 req->pool = pool;
661 return req;
662 }
663
664 static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
665 {
666 struct fsf_qtcb *qtcb;
667
668 if (likely(pool))
669 qtcb = mempool_alloc(pool, GFP_ATOMIC);
670 else
671 qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
672
673 if (unlikely(!qtcb))
674 return NULL;
675
676 memset(qtcb, 0, sizeof(*qtcb));
677 return qtcb;
678 }
679
680 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
681 u32 fsf_cmd, u8 sbtype,
682 mempool_t *pool)
683 {
684 struct zfcp_adapter *adapter = qdio->adapter;
685 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
686
687 if (unlikely(!req))
688 return ERR_PTR(-ENOMEM);
689
690 if (adapter->req_no == 0)
691 adapter->req_no++;
692
693 INIT_LIST_HEAD(&req->list);
694 init_timer(&req->timer);
695 init_completion(&req->completion);
696
697 req->adapter = adapter;
698 req->fsf_command = fsf_cmd;
699 req->req_id = adapter->req_no;
700
701 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
702 if (likely(pool))
703 req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
704 else
705 req->qtcb = zfcp_qtcb_alloc(NULL);
706
707 if (unlikely(!req->qtcb)) {
708 zfcp_fsf_req_free(req);
709 return ERR_PTR(-ENOMEM);
710 }
711
712 req->seq_no = adapter->fsf_req_seq_no;
713 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
714 req->qtcb->prefix.req_id = req->req_id;
715 req->qtcb->prefix.ulp_info = 26;
716 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
717 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
718 req->qtcb->header.req_handle = req->req_id;
719 req->qtcb->header.fsf_command = req->fsf_command;
720 }
721
722 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
723 req->qtcb, sizeof(struct fsf_qtcb));
724
725 return req;
726 }
727
728 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
729 {
730 struct zfcp_adapter *adapter = req->adapter;
731 struct zfcp_qdio *qdio = adapter->qdio;
732 int with_qtcb = (req->qtcb != NULL);
733 int req_id = req->req_id;
734
735 zfcp_reqlist_add(adapter->req_list, req);
736
737 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
738 req->issued = get_tod_clock();
739 if (zfcp_qdio_send(qdio, &req->qdio_req)) {
740 del_timer(&req->timer);
741 /* lookup request again, list might have changed */
742 zfcp_reqlist_find_rm(adapter->req_list, req_id);
743 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
744 return -EIO;
745 }
746
747 /* Don't increase for unsolicited status */
748 if (with_qtcb)
749 adapter->fsf_req_seq_no++;
750 adapter->req_no++;
751
752 return 0;
753 }
754
755 /**
756 * zfcp_fsf_status_read - send status read request
757 * @adapter: pointer to struct zfcp_adapter
758 * @req_flags: request flags
759 * Returns: 0 on success, ERROR otherwise
760 */
761 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
762 {
763 struct zfcp_adapter *adapter = qdio->adapter;
764 struct zfcp_fsf_req *req;
765 struct fsf_status_read_buffer *sr_buf;
766 struct page *page;
767 int retval = -EIO;
768
769 spin_lock_irq(&qdio->req_q_lock);
770 if (zfcp_qdio_sbal_get(qdio))
771 goto out;
772
773 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
774 SBAL_SFLAGS0_TYPE_STATUS,
775 adapter->pool.status_read_req);
776 if (IS_ERR(req)) {
777 retval = PTR_ERR(req);
778 goto out;
779 }
780
781 page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
782 if (!page) {
783 retval = -ENOMEM;
784 goto failed_buf;
785 }
786 sr_buf = page_address(page);
787 memset(sr_buf, 0, sizeof(*sr_buf));
788 req->data = sr_buf;
789
790 zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
791 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
792
793 retval = zfcp_fsf_req_send(req);
794 if (retval)
795 goto failed_req_send;
796
797 goto out;
798
799 failed_req_send:
800 req->data = NULL;
801 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
802 failed_buf:
803 zfcp_dbf_hba_fsf_uss("fssr__1", req);
804 zfcp_fsf_req_free(req);
805 out:
806 spin_unlock_irq(&qdio->req_q_lock);
807 return retval;
808 }
809
810 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
811 {
812 struct scsi_device *sdev = req->data;
813 struct zfcp_scsi_dev *zfcp_sdev;
814 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
815
816 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
817 return;
818
819 zfcp_sdev = sdev_to_zfcp(sdev);
820
821 switch (req->qtcb->header.fsf_status) {
822 case FSF_PORT_HANDLE_NOT_VALID:
823 if (fsq->word[0] == fsq->word[1]) {
824 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
825 "fsafch1");
826 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
827 }
828 break;
829 case FSF_LUN_HANDLE_NOT_VALID:
830 if (fsq->word[0] == fsq->word[1]) {
831 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
832 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
833 }
834 break;
835 case FSF_FCP_COMMAND_DOES_NOT_EXIST:
836 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
837 break;
838 case FSF_PORT_BOXED:
839 zfcp_erp_set_port_status(zfcp_sdev->port,
840 ZFCP_STATUS_COMMON_ACCESS_BOXED);
841 zfcp_erp_port_reopen(zfcp_sdev->port,
842 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
843 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
844 break;
845 case FSF_LUN_BOXED:
846 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
847 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
848 "fsafch4");
849 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
850 break;
851 case FSF_ADAPTER_STATUS_AVAILABLE:
852 switch (fsq->word[0]) {
853 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
854 zfcp_fc_test_link(zfcp_sdev->port);
855 /* fall through */
856 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
857 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
858 break;
859 }
860 break;
861 case FSF_GOOD:
862 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
863 break;
864 }
865 }
866
867 /**
868 * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
869 * @scmnd: The SCSI command to abort
870 * Returns: pointer to struct zfcp_fsf_req
871 */
872
873 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
874 {
875 struct zfcp_fsf_req *req = NULL;
876 struct scsi_device *sdev = scmnd->device;
877 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
878 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
879 unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
880
881 spin_lock_irq(&qdio->req_q_lock);
882 if (zfcp_qdio_sbal_get(qdio))
883 goto out;
884 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
885 SBAL_SFLAGS0_TYPE_READ,
886 qdio->adapter->pool.scsi_abort);
887 if (IS_ERR(req)) {
888 req = NULL;
889 goto out;
890 }
891
892 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
893 ZFCP_STATUS_COMMON_UNBLOCKED)))
894 goto out_error_free;
895
896 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
897
898 req->data = sdev;
899 req->handler = zfcp_fsf_abort_fcp_command_handler;
900 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
901 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
902 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
903
904 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
905 if (!zfcp_fsf_req_send(req))
906 goto out;
907
908 out_error_free:
909 zfcp_fsf_req_free(req);
910 req = NULL;
911 out:
912 spin_unlock_irq(&qdio->req_q_lock);
913 return req;
914 }
915
916 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
917 {
918 struct zfcp_adapter *adapter = req->adapter;
919 struct zfcp_fsf_ct_els *ct = req->data;
920 struct fsf_qtcb_header *header = &req->qtcb->header;
921
922 ct->status = -EINVAL;
923
924 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
925 goto skip_fsfstatus;
926
927 switch (header->fsf_status) {
928 case FSF_GOOD:
929 zfcp_dbf_san_res("fsscth2", req);
930 ct->status = 0;
931 break;
932 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
933 zfcp_fsf_class_not_supp(req);
934 break;
935 case FSF_ADAPTER_STATUS_AVAILABLE:
936 switch (header->fsf_status_qual.word[0]){
937 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
938 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
939 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
940 break;
941 }
942 break;
943 case FSF_PORT_BOXED:
944 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
945 break;
946 case FSF_PORT_HANDLE_NOT_VALID:
947 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
948 /* fall through */
949 case FSF_GENERIC_COMMAND_REJECTED:
950 case FSF_PAYLOAD_SIZE_MISMATCH:
951 case FSF_REQUEST_SIZE_TOO_LARGE:
952 case FSF_RESPONSE_SIZE_TOO_LARGE:
953 case FSF_SBAL_MISMATCH:
954 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
955 break;
956 }
957
958 skip_fsfstatus:
959 if (ct->handler)
960 ct->handler(ct->handler_data);
961 }
962
963 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
964 struct zfcp_qdio_req *q_req,
965 struct scatterlist *sg_req,
966 struct scatterlist *sg_resp)
967 {
968 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
969 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
970 zfcp_qdio_set_sbale_last(qdio, q_req);
971 }
972
973 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
974 struct scatterlist *sg_req,
975 struct scatterlist *sg_resp)
976 {
977 struct zfcp_adapter *adapter = req->adapter;
978 struct zfcp_qdio *qdio = adapter->qdio;
979 struct fsf_qtcb *qtcb = req->qtcb;
980 u32 feat = adapter->adapter_features;
981
982 if (zfcp_adapter_multi_buffer_active(adapter)) {
983 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
984 return -EIO;
985 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
986 return -EIO;
987
988 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
989 zfcp_qdio_sbale_count(sg_req));
990 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
991 zfcp_qdio_set_scount(qdio, &req->qdio_req);
992 return 0;
993 }
994
995 /* use single, unchained SBAL if it can hold the request */
996 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
997 zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
998 sg_req, sg_resp);
999 return 0;
1000 }
1001
1002 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
1003 return -EOPNOTSUPP;
1004
1005 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
1006 return -EIO;
1007
1008 qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
1009
1010 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1011 zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
1012
1013 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1014 return -EIO;
1015
1016 qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
1017
1018 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1019
1020 return 0;
1021 }
1022
1023 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1024 struct scatterlist *sg_req,
1025 struct scatterlist *sg_resp,
1026 unsigned int timeout)
1027 {
1028 int ret;
1029
1030 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
1031 if (ret)
1032 return ret;
1033
1034 /* common settings for ct/gs and els requests */
1035 if (timeout > 255)
1036 timeout = 255; /* max value accepted by hardware */
1037 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1038 req->qtcb->bottom.support.timeout = timeout;
1039 zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
1040
1041 return 0;
1042 }
1043
1044 /**
1045 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1046 * @ct: pointer to struct zfcp_send_ct with data for request
1047 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1048 */
1049 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1050 struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1051 unsigned int timeout)
1052 {
1053 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1054 struct zfcp_fsf_req *req;
1055 int ret = -EIO;
1056
1057 spin_lock_irq(&qdio->req_q_lock);
1058 if (zfcp_qdio_sbal_get(qdio))
1059 goto out;
1060
1061 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1062 SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
1063
1064 if (IS_ERR(req)) {
1065 ret = PTR_ERR(req);
1066 goto out;
1067 }
1068
1069 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1070 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
1071 if (ret)
1072 goto failed_send;
1073
1074 req->handler = zfcp_fsf_send_ct_handler;
1075 req->qtcb->header.port_handle = wka_port->handle;
1076 req->data = ct;
1077
1078 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
1079
1080 ret = zfcp_fsf_req_send(req);
1081 if (ret)
1082 goto failed_send;
1083
1084 goto out;
1085
1086 failed_send:
1087 zfcp_fsf_req_free(req);
1088 out:
1089 spin_unlock_irq(&qdio->req_q_lock);
1090 return ret;
1091 }
1092
1093 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1094 {
1095 struct zfcp_fsf_ct_els *send_els = req->data;
1096 struct fsf_qtcb_header *header = &req->qtcb->header;
1097
1098 send_els->status = -EINVAL;
1099
1100 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1101 goto skip_fsfstatus;
1102
1103 switch (header->fsf_status) {
1104 case FSF_GOOD:
1105 zfcp_dbf_san_res("fsselh1", req);
1106 send_els->status = 0;
1107 break;
1108 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1109 zfcp_fsf_class_not_supp(req);
1110 break;
1111 case FSF_ADAPTER_STATUS_AVAILABLE:
1112 switch (header->fsf_status_qual.word[0]){
1113 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1114 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1115 case FSF_SQ_RETRY_IF_POSSIBLE:
1116 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1117 break;
1118 }
1119 break;
1120 case FSF_ELS_COMMAND_REJECTED:
1121 case FSF_PAYLOAD_SIZE_MISMATCH:
1122 case FSF_REQUEST_SIZE_TOO_LARGE:
1123 case FSF_RESPONSE_SIZE_TOO_LARGE:
1124 break;
1125 case FSF_SBAL_MISMATCH:
1126 /* should never occur, avoided in zfcp_fsf_send_els */
1127 /* fall through */
1128 default:
1129 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1130 break;
1131 }
1132 skip_fsfstatus:
1133 if (send_els->handler)
1134 send_els->handler(send_els->handler_data);
1135 }
1136
1137 /**
1138 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1139 * @els: pointer to struct zfcp_send_els with data for the command
1140 */
1141 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1142 struct zfcp_fsf_ct_els *els, unsigned int timeout)
1143 {
1144 struct zfcp_fsf_req *req;
1145 struct zfcp_qdio *qdio = adapter->qdio;
1146 int ret = -EIO;
1147
1148 spin_lock_irq(&qdio->req_q_lock);
1149 if (zfcp_qdio_sbal_get(qdio))
1150 goto out;
1151
1152 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1153 SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
1154
1155 if (IS_ERR(req)) {
1156 ret = PTR_ERR(req);
1157 goto out;
1158 }
1159
1160 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1161
1162 if (!zfcp_adapter_multi_buffer_active(adapter))
1163 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1164
1165 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1166
1167 if (ret)
1168 goto failed_send;
1169
1170 hton24(req->qtcb->bottom.support.d_id, d_id);
1171 req->handler = zfcp_fsf_send_els_handler;
1172 req->data = els;
1173
1174 zfcp_dbf_san_req("fssels1", req, d_id);
1175
1176 ret = zfcp_fsf_req_send(req);
1177 if (ret)
1178 goto failed_send;
1179
1180 goto out;
1181
1182 failed_send:
1183 zfcp_fsf_req_free(req);
1184 out:
1185 spin_unlock_irq(&qdio->req_q_lock);
1186 return ret;
1187 }
1188
1189 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1190 {
1191 struct zfcp_fsf_req *req;
1192 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1193 int retval = -EIO;
1194
1195 spin_lock_irq(&qdio->req_q_lock);
1196 if (zfcp_qdio_sbal_get(qdio))
1197 goto out;
1198
1199 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1200 SBAL_SFLAGS0_TYPE_READ,
1201 qdio->adapter->pool.erp_req);
1202
1203 if (IS_ERR(req)) {
1204 retval = PTR_ERR(req);
1205 goto out;
1206 }
1207
1208 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1209 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1210
1211 req->qtcb->bottom.config.feature_selection =
1212 FSF_FEATURE_NOTIFICATION_LOST |
1213 FSF_FEATURE_UPDATE_ALERT;
1214 req->erp_action = erp_action;
1215 req->handler = zfcp_fsf_exchange_config_data_handler;
1216 erp_action->fsf_req_id = req->req_id;
1217
1218 zfcp_fsf_start_erp_timer(req);
1219 retval = zfcp_fsf_req_send(req);
1220 if (retval) {
1221 zfcp_fsf_req_free(req);
1222 erp_action->fsf_req_id = 0;
1223 }
1224 out:
1225 spin_unlock_irq(&qdio->req_q_lock);
1226 return retval;
1227 }
1228
1229 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1230 struct fsf_qtcb_bottom_config *data)
1231 {
1232 struct zfcp_fsf_req *req = NULL;
1233 int retval = -EIO;
1234
1235 spin_lock_irq(&qdio->req_q_lock);
1236 if (zfcp_qdio_sbal_get(qdio))
1237 goto out_unlock;
1238
1239 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1240 SBAL_SFLAGS0_TYPE_READ, NULL);
1241
1242 if (IS_ERR(req)) {
1243 retval = PTR_ERR(req);
1244 goto out_unlock;
1245 }
1246
1247 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1248 req->handler = zfcp_fsf_exchange_config_data_handler;
1249
1250 req->qtcb->bottom.config.feature_selection =
1251 FSF_FEATURE_NOTIFICATION_LOST |
1252 FSF_FEATURE_UPDATE_ALERT;
1253
1254 if (data)
1255 req->data = data;
1256
1257 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1258 retval = zfcp_fsf_req_send(req);
1259 spin_unlock_irq(&qdio->req_q_lock);
1260 if (!retval)
1261 wait_for_completion(&req->completion);
1262
1263 zfcp_fsf_req_free(req);
1264 return retval;
1265
1266 out_unlock:
1267 spin_unlock_irq(&qdio->req_q_lock);
1268 return retval;
1269 }
1270
1271 /**
1272 * zfcp_fsf_exchange_port_data - request information about local port
1273 * @erp_action: ERP action for the adapter for which port data is requested
1274 * Returns: 0 on success, error otherwise
1275 */
1276 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1277 {
1278 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1279 struct zfcp_fsf_req *req;
1280 int retval = -EIO;
1281
1282 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1283 return -EOPNOTSUPP;
1284
1285 spin_lock_irq(&qdio->req_q_lock);
1286 if (zfcp_qdio_sbal_get(qdio))
1287 goto out;
1288
1289 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1290 SBAL_SFLAGS0_TYPE_READ,
1291 qdio->adapter->pool.erp_req);
1292
1293 if (IS_ERR(req)) {
1294 retval = PTR_ERR(req);
1295 goto out;
1296 }
1297
1298 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1299 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1300
1301 req->handler = zfcp_fsf_exchange_port_data_handler;
1302 req->erp_action = erp_action;
1303 erp_action->fsf_req_id = req->req_id;
1304
1305 zfcp_fsf_start_erp_timer(req);
1306 retval = zfcp_fsf_req_send(req);
1307 if (retval) {
1308 zfcp_fsf_req_free(req);
1309 erp_action->fsf_req_id = 0;
1310 }
1311 out:
1312 spin_unlock_irq(&qdio->req_q_lock);
1313 return retval;
1314 }
1315
1316 /**
1317 * zfcp_fsf_exchange_port_data_sync - request information about local port
1318 * @qdio: pointer to struct zfcp_qdio
1319 * @data: pointer to struct fsf_qtcb_bottom_port
1320 * Returns: 0 on success, error otherwise
1321 */
1322 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1323 struct fsf_qtcb_bottom_port *data)
1324 {
1325 struct zfcp_fsf_req *req = NULL;
1326 int retval = -EIO;
1327
1328 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1329 return -EOPNOTSUPP;
1330
1331 spin_lock_irq(&qdio->req_q_lock);
1332 if (zfcp_qdio_sbal_get(qdio))
1333 goto out_unlock;
1334
1335 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1336 SBAL_SFLAGS0_TYPE_READ, NULL);
1337
1338 if (IS_ERR(req)) {
1339 retval = PTR_ERR(req);
1340 goto out_unlock;
1341 }
1342
1343 if (data)
1344 req->data = data;
1345
1346 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1347
1348 req->handler = zfcp_fsf_exchange_port_data_handler;
1349 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1350 retval = zfcp_fsf_req_send(req);
1351 spin_unlock_irq(&qdio->req_q_lock);
1352
1353 if (!retval)
1354 wait_for_completion(&req->completion);
1355
1356 zfcp_fsf_req_free(req);
1357
1358 return retval;
1359
1360 out_unlock:
1361 spin_unlock_irq(&qdio->req_q_lock);
1362 return retval;
1363 }
1364
1365 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1366 {
1367 struct zfcp_port *port = req->data;
1368 struct fsf_qtcb_header *header = &req->qtcb->header;
1369 struct fc_els_flogi *plogi;
1370
1371 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1372 goto out;
1373
1374 switch (header->fsf_status) {
1375 case FSF_PORT_ALREADY_OPEN:
1376 break;
1377 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1378 dev_warn(&req->adapter->ccw_device->dev,
1379 "Not enough FCP adapter resources to open "
1380 "remote port 0x%016Lx\n",
1381 (unsigned long long)port->wwpn);
1382 zfcp_erp_set_port_status(port,
1383 ZFCP_STATUS_COMMON_ERP_FAILED);
1384 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1385 break;
1386 case FSF_ADAPTER_STATUS_AVAILABLE:
1387 switch (header->fsf_status_qual.word[0]) {
1388 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1389 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1390 case FSF_SQ_NO_RETRY_POSSIBLE:
1391 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1392 break;
1393 }
1394 break;
1395 case FSF_GOOD:
1396 port->handle = header->port_handle;
1397 atomic_or(ZFCP_STATUS_COMMON_OPEN |
1398 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1399 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
1400 &port->status);
1401 /* check whether D_ID has changed during open */
1402 /*
1403 * FIXME: This check is not airtight, as the FCP channel does
1404 * not monitor closures of target port connections caused on
1405 * the remote side. Thus, they might miss out on invalidating
1406 * locally cached WWPNs (and other N_Port parameters) of gone
1407 * target ports. So, our heroic attempt to make things safe
1408 * could be undermined by 'open port' response data tagged with
1409 * obsolete WWPNs. Another reason to monitor potential
1410 * connection closures ourself at least (by interpreting
1411 * incoming ELS' and unsolicited status). It just crosses my
1412 * mind that one should be able to cross-check by means of
1413 * another GID_PN straight after a port has been opened.
1414 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1415 */
1416 plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
1417 if (req->qtcb->bottom.support.els1_length >=
1418 FSF_PLOGI_MIN_LEN)
1419 zfcp_fc_plogi_evaluate(port, plogi);
1420 break;
1421 case FSF_UNKNOWN_OP_SUBTYPE:
1422 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1423 break;
1424 }
1425
1426 out:
1427 put_device(&port->dev);
1428 }
1429
1430 /**
1431 * zfcp_fsf_open_port - create and send open port request
1432 * @erp_action: pointer to struct zfcp_erp_action
1433 * Returns: 0 on success, error otherwise
1434 */
1435 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1436 {
1437 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1438 struct zfcp_port *port = erp_action->port;
1439 struct zfcp_fsf_req *req;
1440 int retval = -EIO;
1441
1442 spin_lock_irq(&qdio->req_q_lock);
1443 if (zfcp_qdio_sbal_get(qdio))
1444 goto out;
1445
1446 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1447 SBAL_SFLAGS0_TYPE_READ,
1448 qdio->adapter->pool.erp_req);
1449
1450 if (IS_ERR(req)) {
1451 retval = PTR_ERR(req);
1452 goto out;
1453 }
1454
1455 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1456 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1457
1458 req->handler = zfcp_fsf_open_port_handler;
1459 hton24(req->qtcb->bottom.support.d_id, port->d_id);
1460 req->data = port;
1461 req->erp_action = erp_action;
1462 erp_action->fsf_req_id = req->req_id;
1463 get_device(&port->dev);
1464
1465 zfcp_fsf_start_erp_timer(req);
1466 retval = zfcp_fsf_req_send(req);
1467 if (retval) {
1468 zfcp_fsf_req_free(req);
1469 erp_action->fsf_req_id = 0;
1470 put_device(&port->dev);
1471 }
1472 out:
1473 spin_unlock_irq(&qdio->req_q_lock);
1474 return retval;
1475 }
1476
1477 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1478 {
1479 struct zfcp_port *port = req->data;
1480
1481 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1482 return;
1483
1484 switch (req->qtcb->header.fsf_status) {
1485 case FSF_PORT_HANDLE_NOT_VALID:
1486 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
1487 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1488 break;
1489 case FSF_ADAPTER_STATUS_AVAILABLE:
1490 break;
1491 case FSF_GOOD:
1492 zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
1493 break;
1494 }
1495 }
1496
1497 /**
1498 * zfcp_fsf_close_port - create and send close port request
1499 * @erp_action: pointer to struct zfcp_erp_action
1500 * Returns: 0 on success, error otherwise
1501 */
1502 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1503 {
1504 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1505 struct zfcp_fsf_req *req;
1506 int retval = -EIO;
1507
1508 spin_lock_irq(&qdio->req_q_lock);
1509 if (zfcp_qdio_sbal_get(qdio))
1510 goto out;
1511
1512 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1513 SBAL_SFLAGS0_TYPE_READ,
1514 qdio->adapter->pool.erp_req);
1515
1516 if (IS_ERR(req)) {
1517 retval = PTR_ERR(req);
1518 goto out;
1519 }
1520
1521 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1522 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1523
1524 req->handler = zfcp_fsf_close_port_handler;
1525 req->data = erp_action->port;
1526 req->erp_action = erp_action;
1527 req->qtcb->header.port_handle = erp_action->port->handle;
1528 erp_action->fsf_req_id = req->req_id;
1529
1530 zfcp_fsf_start_erp_timer(req);
1531 retval = zfcp_fsf_req_send(req);
1532 if (retval) {
1533 zfcp_fsf_req_free(req);
1534 erp_action->fsf_req_id = 0;
1535 }
1536 out:
1537 spin_unlock_irq(&qdio->req_q_lock);
1538 return retval;
1539 }
1540
1541 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1542 {
1543 struct zfcp_fc_wka_port *wka_port = req->data;
1544 struct fsf_qtcb_header *header = &req->qtcb->header;
1545
1546 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1547 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1548 goto out;
1549 }
1550
1551 switch (header->fsf_status) {
1552 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1553 dev_warn(&req->adapter->ccw_device->dev,
1554 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1555 /* fall through */
1556 case FSF_ADAPTER_STATUS_AVAILABLE:
1557 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1558 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1559 break;
1560 case FSF_GOOD:
1561 wka_port->handle = header->port_handle;
1562 /* fall through */
1563 case FSF_PORT_ALREADY_OPEN:
1564 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1565 }
1566 out:
1567 wake_up(&wka_port->completion_wq);
1568 }
1569
1570 /**
1571 * zfcp_fsf_open_wka_port - create and send open wka-port request
1572 * @wka_port: pointer to struct zfcp_fc_wka_port
1573 * Returns: 0 on success, error otherwise
1574 */
1575 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1576 {
1577 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1578 struct zfcp_fsf_req *req;
1579 int retval = -EIO;
1580
1581 spin_lock_irq(&qdio->req_q_lock);
1582 if (zfcp_qdio_sbal_get(qdio))
1583 goto out;
1584
1585 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1586 SBAL_SFLAGS0_TYPE_READ,
1587 qdio->adapter->pool.erp_req);
1588
1589 if (IS_ERR(req)) {
1590 retval = PTR_ERR(req);
1591 goto out;
1592 }
1593
1594 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1595 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1596
1597 req->handler = zfcp_fsf_open_wka_port_handler;
1598 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1599 req->data = wka_port;
1600
1601 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1602 retval = zfcp_fsf_req_send(req);
1603 if (retval)
1604 zfcp_fsf_req_free(req);
1605 out:
1606 spin_unlock_irq(&qdio->req_q_lock);
1607 return retval;
1608 }
1609
1610 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1611 {
1612 struct zfcp_fc_wka_port *wka_port = req->data;
1613
1614 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1615 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1616 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
1617 }
1618
1619 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1620 wake_up(&wka_port->completion_wq);
1621 }
1622
1623 /**
1624 * zfcp_fsf_close_wka_port - create and send close wka port request
1625 * @wka_port: WKA port to open
1626 * Returns: 0 on success, error otherwise
1627 */
1628 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1629 {
1630 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1631 struct zfcp_fsf_req *req;
1632 int retval = -EIO;
1633
1634 spin_lock_irq(&qdio->req_q_lock);
1635 if (zfcp_qdio_sbal_get(qdio))
1636 goto out;
1637
1638 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1639 SBAL_SFLAGS0_TYPE_READ,
1640 qdio->adapter->pool.erp_req);
1641
1642 if (IS_ERR(req)) {
1643 retval = PTR_ERR(req);
1644 goto out;
1645 }
1646
1647 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1648 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1649
1650 req->handler = zfcp_fsf_close_wka_port_handler;
1651 req->data = wka_port;
1652 req->qtcb->header.port_handle = wka_port->handle;
1653
1654 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1655 retval = zfcp_fsf_req_send(req);
1656 if (retval)
1657 zfcp_fsf_req_free(req);
1658 out:
1659 spin_unlock_irq(&qdio->req_q_lock);
1660 return retval;
1661 }
1662
1663 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1664 {
1665 struct zfcp_port *port = req->data;
1666 struct fsf_qtcb_header *header = &req->qtcb->header;
1667 struct scsi_device *sdev;
1668
1669 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1670 return;
1671
1672 switch (header->fsf_status) {
1673 case FSF_PORT_HANDLE_NOT_VALID:
1674 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
1675 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1676 break;
1677 case FSF_PORT_BOXED:
1678 /* can't use generic zfcp_erp_modify_port_status because
1679 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1680 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1681 shost_for_each_device(sdev, port->adapter->scsi_host)
1682 if (sdev_to_zfcp(sdev)->port == port)
1683 atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
1684 &sdev_to_zfcp(sdev)->status);
1685 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
1686 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
1687 "fscpph2");
1688 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1689 break;
1690 case FSF_ADAPTER_STATUS_AVAILABLE:
1691 switch (header->fsf_status_qual.word[0]) {
1692 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1693 /* fall through */
1694 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1695 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1696 break;
1697 }
1698 break;
1699 case FSF_GOOD:
1700 /* can't use generic zfcp_erp_modify_port_status because
1701 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1702 */
1703 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1704 shost_for_each_device(sdev, port->adapter->scsi_host)
1705 if (sdev_to_zfcp(sdev)->port == port)
1706 atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
1707 &sdev_to_zfcp(sdev)->status);
1708 break;
1709 }
1710 }
1711
1712 /**
1713 * zfcp_fsf_close_physical_port - close physical port
1714 * @erp_action: pointer to struct zfcp_erp_action
1715 * Returns: 0 on success
1716 */
1717 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1718 {
1719 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1720 struct zfcp_fsf_req *req;
1721 int retval = -EIO;
1722
1723 spin_lock_irq(&qdio->req_q_lock);
1724 if (zfcp_qdio_sbal_get(qdio))
1725 goto out;
1726
1727 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1728 SBAL_SFLAGS0_TYPE_READ,
1729 qdio->adapter->pool.erp_req);
1730
1731 if (IS_ERR(req)) {
1732 retval = PTR_ERR(req);
1733 goto out;
1734 }
1735
1736 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1737 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1738
1739 req->data = erp_action->port;
1740 req->qtcb->header.port_handle = erp_action->port->handle;
1741 req->erp_action = erp_action;
1742 req->handler = zfcp_fsf_close_physical_port_handler;
1743 erp_action->fsf_req_id = req->req_id;
1744
1745 zfcp_fsf_start_erp_timer(req);
1746 retval = zfcp_fsf_req_send(req);
1747 if (retval) {
1748 zfcp_fsf_req_free(req);
1749 erp_action->fsf_req_id = 0;
1750 }
1751 out:
1752 spin_unlock_irq(&qdio->req_q_lock);
1753 return retval;
1754 }
1755
1756 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1757 {
1758 struct zfcp_adapter *adapter = req->adapter;
1759 struct scsi_device *sdev = req->data;
1760 struct zfcp_scsi_dev *zfcp_sdev;
1761 struct fsf_qtcb_header *header = &req->qtcb->header;
1762 union fsf_status_qual *qual = &header->fsf_status_qual;
1763
1764 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1765 return;
1766
1767 zfcp_sdev = sdev_to_zfcp(sdev);
1768
1769 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1770 ZFCP_STATUS_COMMON_ACCESS_BOXED,
1771 &zfcp_sdev->status);
1772
1773 switch (header->fsf_status) {
1774
1775 case FSF_PORT_HANDLE_NOT_VALID:
1776 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
1777 /* fall through */
1778 case FSF_LUN_ALREADY_OPEN:
1779 break;
1780 case FSF_PORT_BOXED:
1781 zfcp_erp_set_port_status(zfcp_sdev->port,
1782 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1783 zfcp_erp_port_reopen(zfcp_sdev->port,
1784 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
1785 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1786 break;
1787 case FSF_LUN_SHARING_VIOLATION:
1788 if (qual->word[0])
1789 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
1790 "LUN 0x%Lx on port 0x%Lx is already in "
1791 "use by CSS%d, MIF Image ID %x\n",
1792 zfcp_scsi_dev_lun(sdev),
1793 (unsigned long long)zfcp_sdev->port->wwpn,
1794 qual->fsf_queue_designator.cssid,
1795 qual->fsf_queue_designator.hla);
1796 zfcp_erp_set_lun_status(sdev,
1797 ZFCP_STATUS_COMMON_ERP_FAILED |
1798 ZFCP_STATUS_COMMON_ACCESS_DENIED);
1799 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1800 break;
1801 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1802 dev_warn(&adapter->ccw_device->dev,
1803 "No handle is available for LUN "
1804 "0x%016Lx on port 0x%016Lx\n",
1805 (unsigned long long)zfcp_scsi_dev_lun(sdev),
1806 (unsigned long long)zfcp_sdev->port->wwpn);
1807 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
1808 /* fall through */
1809 case FSF_INVALID_COMMAND_OPTION:
1810 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1811 break;
1812 case FSF_ADAPTER_STATUS_AVAILABLE:
1813 switch (header->fsf_status_qual.word[0]) {
1814 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1815 zfcp_fc_test_link(zfcp_sdev->port);
1816 /* fall through */
1817 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1818 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1819 break;
1820 }
1821 break;
1822
1823 case FSF_GOOD:
1824 zfcp_sdev->lun_handle = header->lun_handle;
1825 atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1826 break;
1827 }
1828 }
1829
1830 /**
1831 * zfcp_fsf_open_lun - open LUN
1832 * @erp_action: pointer to struct zfcp_erp_action
1833 * Returns: 0 on success, error otherwise
1834 */
1835 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
1836 {
1837 struct zfcp_adapter *adapter = erp_action->adapter;
1838 struct zfcp_qdio *qdio = adapter->qdio;
1839 struct zfcp_fsf_req *req;
1840 int retval = -EIO;
1841
1842 spin_lock_irq(&qdio->req_q_lock);
1843 if (zfcp_qdio_sbal_get(qdio))
1844 goto out;
1845
1846 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1847 SBAL_SFLAGS0_TYPE_READ,
1848 adapter->pool.erp_req);
1849
1850 if (IS_ERR(req)) {
1851 retval = PTR_ERR(req);
1852 goto out;
1853 }
1854
1855 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1856 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1857
1858 req->qtcb->header.port_handle = erp_action->port->handle;
1859 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
1860 req->handler = zfcp_fsf_open_lun_handler;
1861 req->data = erp_action->sdev;
1862 req->erp_action = erp_action;
1863 erp_action->fsf_req_id = req->req_id;
1864
1865 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1866 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1867
1868 zfcp_fsf_start_erp_timer(req);
1869 retval = zfcp_fsf_req_send(req);
1870 if (retval) {
1871 zfcp_fsf_req_free(req);
1872 erp_action->fsf_req_id = 0;
1873 }
1874 out:
1875 spin_unlock_irq(&qdio->req_q_lock);
1876 return retval;
1877 }
1878
1879 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1880 {
1881 struct scsi_device *sdev = req->data;
1882 struct zfcp_scsi_dev *zfcp_sdev;
1883
1884 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1885 return;
1886
1887 zfcp_sdev = sdev_to_zfcp(sdev);
1888
1889 switch (req->qtcb->header.fsf_status) {
1890 case FSF_PORT_HANDLE_NOT_VALID:
1891 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
1892 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1893 break;
1894 case FSF_LUN_HANDLE_NOT_VALID:
1895 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
1896 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1897 break;
1898 case FSF_PORT_BOXED:
1899 zfcp_erp_set_port_status(zfcp_sdev->port,
1900 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1901 zfcp_erp_port_reopen(zfcp_sdev->port,
1902 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
1903 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1904 break;
1905 case FSF_ADAPTER_STATUS_AVAILABLE:
1906 switch (req->qtcb->header.fsf_status_qual.word[0]) {
1907 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1908 zfcp_fc_test_link(zfcp_sdev->port);
1909 /* fall through */
1910 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1911 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1912 break;
1913 }
1914 break;
1915 case FSF_GOOD:
1916 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1917 break;
1918 }
1919 }
1920
1921 /**
1922 * zfcp_fsf_close_LUN - close LUN
1923 * @erp_action: pointer to erp_action triggering the "close LUN"
1924 * Returns: 0 on success, error otherwise
1925 */
1926 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
1927 {
1928 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1929 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
1930 struct zfcp_fsf_req *req;
1931 int retval = -EIO;
1932
1933 spin_lock_irq(&qdio->req_q_lock);
1934 if (zfcp_qdio_sbal_get(qdio))
1935 goto out;
1936
1937 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
1938 SBAL_SFLAGS0_TYPE_READ,
1939 qdio->adapter->pool.erp_req);
1940
1941 if (IS_ERR(req)) {
1942 retval = PTR_ERR(req);
1943 goto out;
1944 }
1945
1946 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1947 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1948
1949 req->qtcb->header.port_handle = erp_action->port->handle;
1950 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
1951 req->handler = zfcp_fsf_close_lun_handler;
1952 req->data = erp_action->sdev;
1953 req->erp_action = erp_action;
1954 erp_action->fsf_req_id = req->req_id;
1955
1956 zfcp_fsf_start_erp_timer(req);
1957 retval = zfcp_fsf_req_send(req);
1958 if (retval) {
1959 zfcp_fsf_req_free(req);
1960 erp_action->fsf_req_id = 0;
1961 }
1962 out:
1963 spin_unlock_irq(&qdio->req_q_lock);
1964 return retval;
1965 }
1966
1967 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
1968 {
1969 lat_rec->sum += lat;
1970 lat_rec->min = min(lat_rec->min, lat);
1971 lat_rec->max = max(lat_rec->max, lat);
1972 }
1973
1974 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
1975 {
1976 struct fsf_qual_latency_info *lat_in;
1977 struct latency_cont *lat = NULL;
1978 struct zfcp_scsi_dev *zfcp_sdev;
1979 struct zfcp_blk_drv_data blktrc;
1980 int ticks = req->adapter->timer_ticks;
1981
1982 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
1983
1984 blktrc.flags = 0;
1985 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
1986 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1987 blktrc.flags |= ZFCP_BLK_REQ_ERROR;
1988 blktrc.inb_usage = 0;
1989 blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
1990
1991 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
1992 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
1993 zfcp_sdev = sdev_to_zfcp(scsi->device);
1994 blktrc.flags |= ZFCP_BLK_LAT_VALID;
1995 blktrc.channel_lat = lat_in->channel_lat * ticks;
1996 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
1997
1998 switch (req->qtcb->bottom.io.data_direction) {
1999 case FSF_DATADIR_DIF_READ_STRIP:
2000 case FSF_DATADIR_DIF_READ_CONVERT:
2001 case FSF_DATADIR_READ:
2002 lat = &zfcp_sdev->latencies.read;
2003 break;
2004 case FSF_DATADIR_DIF_WRITE_INSERT:
2005 case FSF_DATADIR_DIF_WRITE_CONVERT:
2006 case FSF_DATADIR_WRITE:
2007 lat = &zfcp_sdev->latencies.write;
2008 break;
2009 case FSF_DATADIR_CMND:
2010 lat = &zfcp_sdev->latencies.cmd;
2011 break;
2012 }
2013
2014 if (lat) {
2015 spin_lock(&zfcp_sdev->latencies.lock);
2016 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2017 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2018 lat->counter++;
2019 spin_unlock(&zfcp_sdev->latencies.lock);
2020 }
2021 }
2022
2023 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
2024 sizeof(blktrc));
2025 }
2026
2027 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
2028 {
2029 struct scsi_cmnd *scmnd = req->data;
2030 struct scsi_device *sdev = scmnd->device;
2031 struct zfcp_scsi_dev *zfcp_sdev;
2032 struct fsf_qtcb_header *header = &req->qtcb->header;
2033
2034 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2035 return;
2036
2037 zfcp_sdev = sdev_to_zfcp(sdev);
2038
2039 switch (header->fsf_status) {
2040 case FSF_HANDLE_MISMATCH:
2041 case FSF_PORT_HANDLE_NOT_VALID:
2042 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1");
2043 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2044 break;
2045 case FSF_FCPLUN_NOT_VALID:
2046 case FSF_LUN_HANDLE_NOT_VALID:
2047 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
2048 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2049 break;
2050 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2051 zfcp_fsf_class_not_supp(req);
2052 break;
2053 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2054 dev_err(&req->adapter->ccw_device->dev,
2055 "Incorrect direction %d, LUN 0x%016Lx on port "
2056 "0x%016Lx closed\n",
2057 req->qtcb->bottom.io.data_direction,
2058 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2059 (unsigned long long)zfcp_sdev->port->wwpn);
2060 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2061 "fssfch3");
2062 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2063 break;
2064 case FSF_CMND_LENGTH_NOT_VALID:
2065 dev_err(&req->adapter->ccw_device->dev,
2066 "Incorrect CDB length %d, LUN 0x%016Lx on "
2067 "port 0x%016Lx closed\n",
2068 req->qtcb->bottom.io.fcp_cmnd_length,
2069 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2070 (unsigned long long)zfcp_sdev->port->wwpn);
2071 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2072 "fssfch4");
2073 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2074 break;
2075 case FSF_PORT_BOXED:
2076 zfcp_erp_set_port_status(zfcp_sdev->port,
2077 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2078 zfcp_erp_port_reopen(zfcp_sdev->port,
2079 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
2080 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2081 break;
2082 case FSF_LUN_BOXED:
2083 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2084 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2085 "fssfch6");
2086 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2087 break;
2088 case FSF_ADAPTER_STATUS_AVAILABLE:
2089 if (header->fsf_status_qual.word[0] ==
2090 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2091 zfcp_fc_test_link(zfcp_sdev->port);
2092 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2093 break;
2094 }
2095 }
2096
2097 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2098 {
2099 struct scsi_cmnd *scpnt;
2100 struct fcp_resp_with_ext *fcp_rsp;
2101 unsigned long flags;
2102
2103 read_lock_irqsave(&req->adapter->abort_lock, flags);
2104
2105 scpnt = req->data;
2106 if (unlikely(!scpnt)) {
2107 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2108 return;
2109 }
2110
2111 zfcp_fsf_fcp_handler_common(req);
2112
2113 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2114 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2115 goto skip_fsfstatus;
2116 }
2117
2118 switch (req->qtcb->header.fsf_status) {
2119 case FSF_INCONSISTENT_PROT_DATA:
2120 case FSF_INVALID_PROT_PARM:
2121 set_host_byte(scpnt, DID_ERROR);
2122 goto skip_fsfstatus;
2123 case FSF_BLOCK_GUARD_CHECK_FAILURE:
2124 zfcp_scsi_dif_sense_error(scpnt, 0x1);
2125 goto skip_fsfstatus;
2126 case FSF_APP_TAG_CHECK_FAILURE:
2127 zfcp_scsi_dif_sense_error(scpnt, 0x2);
2128 goto skip_fsfstatus;
2129 case FSF_REF_TAG_CHECK_FAILURE:
2130 zfcp_scsi_dif_sense_error(scpnt, 0x3);
2131 goto skip_fsfstatus;
2132 }
2133 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2134 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2135
2136 skip_fsfstatus:
2137 zfcp_fsf_req_trace(req, scpnt);
2138 zfcp_dbf_scsi_result(scpnt, req);
2139
2140 scpnt->host_scribble = NULL;
2141 (scpnt->scsi_done) (scpnt);
2142 /*
2143 * We must hold this lock until scsi_done has been called.
2144 * Otherwise we may call scsi_done after abort regarding this
2145 * command has completed.
2146 * Note: scsi_done must not block!
2147 */
2148 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2149 }
2150
2151 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2152 {
2153 switch (scsi_get_prot_op(scsi_cmnd)) {
2154 case SCSI_PROT_NORMAL:
2155 switch (scsi_cmnd->sc_data_direction) {
2156 case DMA_NONE:
2157 *data_dir = FSF_DATADIR_CMND;
2158 break;
2159 case DMA_FROM_DEVICE:
2160 *data_dir = FSF_DATADIR_READ;
2161 break;
2162 case DMA_TO_DEVICE:
2163 *data_dir = FSF_DATADIR_WRITE;
2164 break;
2165 case DMA_BIDIRECTIONAL:
2166 return -EINVAL;
2167 }
2168 break;
2169
2170 case SCSI_PROT_READ_STRIP:
2171 *data_dir = FSF_DATADIR_DIF_READ_STRIP;
2172 break;
2173 case SCSI_PROT_WRITE_INSERT:
2174 *data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
2175 break;
2176 case SCSI_PROT_READ_PASS:
2177 *data_dir = FSF_DATADIR_DIF_READ_CONVERT;
2178 break;
2179 case SCSI_PROT_WRITE_PASS:
2180 *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
2181 break;
2182 default:
2183 return -EINVAL;
2184 }
2185
2186 return 0;
2187 }
2188
2189 /**
2190 * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
2191 * @scsi_cmnd: scsi command to be sent
2192 */
2193 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2194 {
2195 struct zfcp_fsf_req *req;
2196 struct fcp_cmnd *fcp_cmnd;
2197 u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2198 int retval = -EIO;
2199 struct scsi_device *sdev = scsi_cmnd->device;
2200 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2201 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2202 struct zfcp_qdio *qdio = adapter->qdio;
2203 struct fsf_qtcb_bottom_io *io;
2204 unsigned long flags;
2205
2206 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2207 ZFCP_STATUS_COMMON_UNBLOCKED)))
2208 return -EBUSY;
2209
2210 spin_lock_irqsave(&qdio->req_q_lock, flags);
2211 if (atomic_read(&qdio->req_q_free) <= 0) {
2212 atomic_inc(&qdio->req_q_full);
2213 goto out;
2214 }
2215
2216 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2217 sbtype = SBAL_SFLAGS0_TYPE_WRITE;
2218
2219 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2220 sbtype, adapter->pool.scsi_req);
2221
2222 if (IS_ERR(req)) {
2223 retval = PTR_ERR(req);
2224 goto out;
2225 }
2226
2227 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2228
2229 io = &req->qtcb->bottom.io;
2230 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2231 req->data = scsi_cmnd;
2232 req->handler = zfcp_fsf_fcp_cmnd_handler;
2233 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2234 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2235 io->service_class = FSF_CLASS_3;
2236 io->fcp_cmnd_length = FCP_CMND_LEN;
2237
2238 if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
2239 io->data_block_length = scsi_cmnd->device->sector_size;
2240 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
2241 }
2242
2243 if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
2244 goto failed_scsi_cmnd;
2245
2246 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2247 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
2248
2249 if (scsi_prot_sg_count(scsi_cmnd)) {
2250 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2251 scsi_prot_sg_count(scsi_cmnd));
2252 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2253 scsi_prot_sglist(scsi_cmnd));
2254 if (retval)
2255 goto failed_scsi_cmnd;
2256 io->prot_data_length = zfcp_qdio_real_bytes(
2257 scsi_prot_sglist(scsi_cmnd));
2258 }
2259
2260 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2261 scsi_sglist(scsi_cmnd));
2262 if (unlikely(retval))
2263 goto failed_scsi_cmnd;
2264
2265 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2266 if (zfcp_adapter_multi_buffer_active(adapter))
2267 zfcp_qdio_set_scount(qdio, &req->qdio_req);
2268
2269 retval = zfcp_fsf_req_send(req);
2270 if (unlikely(retval))
2271 goto failed_scsi_cmnd;
2272
2273 goto out;
2274
2275 failed_scsi_cmnd:
2276 zfcp_fsf_req_free(req);
2277 scsi_cmnd->host_scribble = NULL;
2278 out:
2279 spin_unlock_irqrestore(&qdio->req_q_lock, flags);
2280 return retval;
2281 }
2282
2283 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2284 {
2285 struct fcp_resp_with_ext *fcp_rsp;
2286 struct fcp_resp_rsp_info *rsp_info;
2287
2288 zfcp_fsf_fcp_handler_common(req);
2289
2290 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2291 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2292
2293 if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2294 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2295 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2296 }
2297
2298 /**
2299 * zfcp_fsf_fcp_task_mgmt - send SCSI task management command
2300 * @scmnd: SCSI command to send the task management command for
2301 * @tm_flags: unsigned byte for task management flags
2302 * Returns: on success pointer to struct fsf_req, NULL otherwise
2303 */
2304 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
2305 u8 tm_flags)
2306 {
2307 struct zfcp_fsf_req *req = NULL;
2308 struct fcp_cmnd *fcp_cmnd;
2309 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
2310 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2311
2312 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2313 ZFCP_STATUS_COMMON_UNBLOCKED)))
2314 return NULL;
2315
2316 spin_lock_irq(&qdio->req_q_lock);
2317 if (zfcp_qdio_sbal_get(qdio))
2318 goto out;
2319
2320 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2321 SBAL_SFLAGS0_TYPE_WRITE,
2322 qdio->adapter->pool.scsi_req);
2323
2324 if (IS_ERR(req)) {
2325 req = NULL;
2326 goto out;
2327 }
2328
2329 req->data = scmnd;
2330 req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2331 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2332 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2333 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2334 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2335 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2336
2337 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2338
2339 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2340 zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
2341
2342 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2343 if (!zfcp_fsf_req_send(req))
2344 goto out;
2345
2346 zfcp_fsf_req_free(req);
2347 req = NULL;
2348 out:
2349 spin_unlock_irq(&qdio->req_q_lock);
2350 return req;
2351 }
2352
2353 /**
2354 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2355 * @adapter: pointer to struct zfcp_adapter
2356 * @sbal_idx: response queue index of SBAL to be processed
2357 */
2358 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2359 {
2360 struct zfcp_adapter *adapter = qdio->adapter;
2361 struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2362 struct qdio_buffer_element *sbale;
2363 struct zfcp_fsf_req *fsf_req;
2364 unsigned long req_id;
2365 int idx;
2366
2367 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2368
2369 sbale = &sbal->element[idx];
2370 req_id = (unsigned long) sbale->addr;
2371 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2372
2373 if (!fsf_req) {
2374 /*
2375 * Unknown request means that we have potentially memory
2376 * corruption and must stop the machine immediately.
2377 */
2378 zfcp_qdio_siosl(adapter);
2379 panic("error: unknown req_id (%lx) on adapter %s.\n",
2380 req_id, dev_name(&adapter->ccw_device->dev));
2381 }
2382
2383 fsf_req->qdio_req.sbal_response = sbal_idx;
2384 zfcp_fsf_req_complete(fsf_req);
2385
2386 if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
2387 break;
2388 }
2389 }
This page took 0.09925 seconds and 5 git commands to generate.