2 * Copyright (C) 2005 - 2010 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
12 * Contact Information:
13 * linux-drivers@serverengines.com
16 * 209 N. Fair Oaks Ave
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/semaphore.h>
30 #include <scsi/libiscsi.h>
31 #include <scsi/scsi_transport_iscsi.h>
32 #include <scsi/scsi_transport.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi.h>
41 static unsigned int be_iopoll_budget
= 10;
42 static unsigned int be_max_phys_size
= 64;
43 static unsigned int enable_msix
= 1;
44 static unsigned int gcrashmode
= 0;
45 static unsigned int num_hba
= 0;
47 MODULE_DEVICE_TABLE(pci
, beiscsi_pci_id_table
);
48 MODULE_DESCRIPTION(DRV_DESC
" " BUILD_STR
);
49 MODULE_AUTHOR("ServerEngines Corporation");
50 MODULE_LICENSE("GPL");
51 module_param(be_iopoll_budget
, int, 0);
52 module_param(enable_msix
, int, 0);
53 module_param(be_max_phys_size
, uint
, S_IRUGO
);
54 MODULE_PARM_DESC(be_max_phys_size
, "Maximum Size (In Kilobytes) of physically"
55 "contiguous memory that can be allocated."
58 static int beiscsi_slave_configure(struct scsi_device
*sdev
)
60 blk_queue_max_segment_size(sdev
->request_queue
, 65536);
64 static int beiscsi_eh_abort(struct scsi_cmnd
*sc
)
66 struct iscsi_cls_session
*cls_session
;
67 struct iscsi_task
*aborted_task
= (struct iscsi_task
*)sc
->SCp
.ptr
;
68 struct beiscsi_io_task
*aborted_io_task
;
69 struct iscsi_conn
*conn
;
70 struct beiscsi_conn
*beiscsi_conn
;
71 struct beiscsi_hba
*phba
;
72 struct iscsi_session
*session
;
73 struct invalidate_command_table
*inv_tbl
;
74 struct be_dma_mem nonemb_cmd
;
75 unsigned int cid
, tag
, num_invalidate
;
77 cls_session
= starget_to_session(scsi_target(sc
->device
));
78 session
= cls_session
->dd_data
;
80 spin_lock_bh(&session
->lock
);
81 if (!aborted_task
|| !aborted_task
->sc
) {
83 spin_unlock_bh(&session
->lock
);
87 aborted_io_task
= aborted_task
->dd_data
;
88 if (!aborted_io_task
->scsi_cmnd
) {
89 /* raced or invalid command */
90 spin_unlock_bh(&session
->lock
);
93 spin_unlock_bh(&session
->lock
);
94 conn
= aborted_task
->conn
;
95 beiscsi_conn
= conn
->dd_data
;
96 phba
= beiscsi_conn
->phba
;
99 cid
= beiscsi_conn
->beiscsi_conn_cid
;
100 inv_tbl
= phba
->inv_tbl
;
101 memset(inv_tbl
, 0x0, sizeof(*inv_tbl
));
103 inv_tbl
->icd
= aborted_io_task
->psgl_handle
->sgl_index
;
105 nonemb_cmd
.va
= pci_alloc_consistent(phba
->ctrl
.pdev
,
106 sizeof(struct invalidate_commands_params_in
),
108 if (nonemb_cmd
.va
== NULL
) {
110 "Failed to allocate memory for"
111 "mgmt_invalidate_icds\n");
114 nonemb_cmd
.size
= sizeof(struct invalidate_commands_params_in
);
116 tag
= mgmt_invalidate_icds(phba
, inv_tbl
, num_invalidate
,
119 shost_printk(KERN_WARNING
, phba
->shost
,
120 "mgmt_invalidate_icds could not be"
122 pci_free_consistent(phba
->ctrl
.pdev
, nonemb_cmd
.size
,
123 nonemb_cmd
.va
, nonemb_cmd
.dma
);
127 wait_event_interruptible(phba
->ctrl
.mcc_wait
[tag
],
128 phba
->ctrl
.mcc_numtag
[tag
]);
129 free_mcc_tag(&phba
->ctrl
, tag
);
131 pci_free_consistent(phba
->ctrl
.pdev
, nonemb_cmd
.size
,
132 nonemb_cmd
.va
, nonemb_cmd
.dma
);
133 return iscsi_eh_abort(sc
);
136 static int beiscsi_eh_device_reset(struct scsi_cmnd
*sc
)
138 struct iscsi_task
*abrt_task
;
139 struct beiscsi_io_task
*abrt_io_task
;
140 struct iscsi_conn
*conn
;
141 struct beiscsi_conn
*beiscsi_conn
;
142 struct beiscsi_hba
*phba
;
143 struct iscsi_session
*session
;
144 struct iscsi_cls_session
*cls_session
;
145 struct invalidate_command_table
*inv_tbl
;
146 struct be_dma_mem nonemb_cmd
;
147 unsigned int cid
, tag
, i
, num_invalidate
;
150 /* invalidate iocbs */
151 cls_session
= starget_to_session(scsi_target(sc
->device
));
152 session
= cls_session
->dd_data
;
153 spin_lock_bh(&session
->lock
);
154 if (!session
->leadconn
|| session
->state
!= ISCSI_STATE_LOGGED_IN
)
157 conn
= session
->leadconn
;
158 beiscsi_conn
= conn
->dd_data
;
159 phba
= beiscsi_conn
->phba
;
160 cid
= beiscsi_conn
->beiscsi_conn_cid
;
161 inv_tbl
= phba
->inv_tbl
;
162 memset(inv_tbl
, 0x0, sizeof(*inv_tbl
) * BE2_CMDS_PER_CXN
);
164 for (i
= 0; i
< conn
->session
->cmds_max
; i
++) {
165 abrt_task
= conn
->session
->cmds
[i
];
166 abrt_io_task
= abrt_task
->dd_data
;
167 if (!abrt_task
->sc
|| abrt_task
->state
== ISCSI_TASK_FREE
)
170 if (abrt_task
->sc
->device
->lun
!= abrt_task
->sc
->device
->lun
)
174 inv_tbl
->icd
= abrt_io_task
->psgl_handle
->sgl_index
;
178 spin_unlock_bh(&session
->lock
);
179 inv_tbl
= phba
->inv_tbl
;
181 nonemb_cmd
.va
= pci_alloc_consistent(phba
->ctrl
.pdev
,
182 sizeof(struct invalidate_commands_params_in
),
184 if (nonemb_cmd
.va
== NULL
) {
186 "Failed to allocate memory for"
187 "mgmt_invalidate_icds\n");
190 nonemb_cmd
.size
= sizeof(struct invalidate_commands_params_in
);
191 memset(nonemb_cmd
.va
, 0, nonemb_cmd
.size
);
192 tag
= mgmt_invalidate_icds(phba
, inv_tbl
, num_invalidate
,
195 shost_printk(KERN_WARNING
, phba
->shost
,
196 "mgmt_invalidate_icds could not be"
198 pci_free_consistent(phba
->ctrl
.pdev
, nonemb_cmd
.size
,
199 nonemb_cmd
.va
, nonemb_cmd
.dma
);
202 wait_event_interruptible(phba
->ctrl
.mcc_wait
[tag
],
203 phba
->ctrl
.mcc_numtag
[tag
]);
204 free_mcc_tag(&phba
->ctrl
, tag
);
206 pci_free_consistent(phba
->ctrl
.pdev
, nonemb_cmd
.size
,
207 nonemb_cmd
.va
, nonemb_cmd
.dma
);
208 return iscsi_eh_device_reset(sc
);
210 spin_unlock_bh(&session
->lock
);
214 /*------------------- PCI Driver operations and data ----------------- */
215 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table
) = {
216 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID1
) },
217 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID2
) },
218 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID1
) },
219 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID2
) },
220 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID3
) },
223 MODULE_DEVICE_TABLE(pci
, beiscsi_pci_id_table
);
225 static struct scsi_host_template beiscsi_sht
= {
226 .module
= THIS_MODULE
,
227 .name
= "ServerEngines 10Gbe open-iscsi Initiator Driver",
228 .proc_name
= DRV_NAME
,
229 .queuecommand
= iscsi_queuecommand
,
230 .change_queue_depth
= iscsi_change_queue_depth
,
231 .slave_configure
= beiscsi_slave_configure
,
232 .target_alloc
= iscsi_target_alloc
,
233 .eh_abort_handler
= beiscsi_eh_abort
,
234 .eh_device_reset_handler
= beiscsi_eh_device_reset
,
235 .eh_target_reset_handler
= iscsi_eh_session_reset
,
236 .sg_tablesize
= BEISCSI_SGLIST_ELEMENTS
,
237 .can_queue
= BE2_IO_DEPTH
,
239 .max_sectors
= BEISCSI_MAX_SECTORS
,
240 .cmd_per_lun
= BEISCSI_CMD_PER_LUN
,
241 .use_clustering
= ENABLE_CLUSTERING
,
244 static struct scsi_transport_template
*beiscsi_scsi_transport
;
246 static struct beiscsi_hba
*beiscsi_hba_alloc(struct pci_dev
*pcidev
)
248 struct beiscsi_hba
*phba
;
249 struct Scsi_Host
*shost
;
251 shost
= iscsi_host_alloc(&beiscsi_sht
, sizeof(*phba
), 0);
253 dev_err(&pcidev
->dev
, "beiscsi_hba_alloc -"
254 "iscsi_host_alloc failed\n");
257 shost
->dma_boundary
= pcidev
->dma_mask
;
258 shost
->max_id
= BE2_MAX_SESSIONS
;
259 shost
->max_channel
= 0;
260 shost
->max_cmd_len
= BEISCSI_MAX_CMD_LEN
;
261 shost
->max_lun
= BEISCSI_NUM_MAX_LUN
;
262 shost
->transportt
= beiscsi_scsi_transport
;
263 phba
= iscsi_host_priv(shost
);
264 memset(phba
, 0, sizeof(*phba
));
266 phba
->pcidev
= pci_dev_get(pcidev
);
267 pci_set_drvdata(pcidev
, phba
);
269 if (iscsi_host_add(shost
, &phba
->pcidev
->dev
))
274 pci_dev_put(phba
->pcidev
);
275 iscsi_host_free(phba
->shost
);
279 static void beiscsi_unmap_pci_function(struct beiscsi_hba
*phba
)
282 iounmap(phba
->csr_va
);
286 iounmap(phba
->db_va
);
290 iounmap(phba
->pci_va
);
295 static int beiscsi_map_pci_bars(struct beiscsi_hba
*phba
,
296 struct pci_dev
*pcidev
)
301 addr
= ioremap_nocache(pci_resource_start(pcidev
, 2),
302 pci_resource_len(pcidev
, 2));
305 phba
->ctrl
.csr
= addr
;
307 phba
->csr_pa
.u
.a64
.address
= pci_resource_start(pcidev
, 2);
309 addr
= ioremap_nocache(pci_resource_start(pcidev
, 4), 128 * 1024);
312 phba
->ctrl
.db
= addr
;
314 phba
->db_pa
.u
.a64
.address
= pci_resource_start(pcidev
, 4);
316 if (phba
->generation
== BE_GEN2
)
321 addr
= ioremap_nocache(pci_resource_start(pcidev
, pcicfg_reg
),
322 pci_resource_len(pcidev
, pcicfg_reg
));
326 phba
->ctrl
.pcicfg
= addr
;
328 phba
->pci_pa
.u
.a64
.address
= pci_resource_start(pcidev
, pcicfg_reg
);
332 beiscsi_unmap_pci_function(phba
);
336 static int beiscsi_enable_pci(struct pci_dev
*pcidev
)
340 ret
= pci_enable_device(pcidev
);
342 dev_err(&pcidev
->dev
, "beiscsi_enable_pci - enable device "
343 "failed. Returning -ENODEV\n");
347 pci_set_master(pcidev
);
348 if (pci_set_consistent_dma_mask(pcidev
, DMA_BIT_MASK(64))) {
349 ret
= pci_set_consistent_dma_mask(pcidev
, DMA_BIT_MASK(32));
351 dev_err(&pcidev
->dev
, "Could not set PCI DMA Mask\n");
352 pci_disable_device(pcidev
);
359 static int be_ctrl_init(struct beiscsi_hba
*phba
, struct pci_dev
*pdev
)
361 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
362 struct be_dma_mem
*mbox_mem_alloc
= &ctrl
->mbox_mem_alloced
;
363 struct be_dma_mem
*mbox_mem_align
= &ctrl
->mbox_mem
;
367 status
= beiscsi_map_pci_bars(phba
, pdev
);
370 mbox_mem_alloc
->size
= sizeof(struct be_mcc_mailbox
) + 16;
371 mbox_mem_alloc
->va
= pci_alloc_consistent(pdev
,
372 mbox_mem_alloc
->size
,
373 &mbox_mem_alloc
->dma
);
374 if (!mbox_mem_alloc
->va
) {
375 beiscsi_unmap_pci_function(phba
);
380 mbox_mem_align
->size
= sizeof(struct be_mcc_mailbox
);
381 mbox_mem_align
->va
= PTR_ALIGN(mbox_mem_alloc
->va
, 16);
382 mbox_mem_align
->dma
= PTR_ALIGN(mbox_mem_alloc
->dma
, 16);
383 memset(mbox_mem_align
->va
, 0, sizeof(struct be_mcc_mailbox
));
384 spin_lock_init(&ctrl
->mbox_lock
);
385 spin_lock_init(&phba
->ctrl
.mcc_lock
);
386 spin_lock_init(&phba
->ctrl
.mcc_cq_lock
);
391 static void beiscsi_get_params(struct beiscsi_hba
*phba
)
393 phba
->params
.ios_per_ctrl
= (phba
->fw_config
.iscsi_icd_count
394 - (phba
->fw_config
.iscsi_cid_count
397 phba
->params
.cxns_per_ctrl
= phba
->fw_config
.iscsi_cid_count
;
398 phba
->params
.asyncpdus_per_ctrl
= phba
->fw_config
.iscsi_cid_count
* 2;
399 phba
->params
.icds_per_ctrl
= phba
->fw_config
.iscsi_icd_count
;;
400 phba
->params
.num_sge_per_io
= BE2_SGE
;
401 phba
->params
.defpdu_hdr_sz
= BE2_DEFPDU_HDR_SZ
;
402 phba
->params
.defpdu_data_sz
= BE2_DEFPDU_DATA_SZ
;
403 phba
->params
.eq_timer
= 64;
404 phba
->params
.num_eq_entries
=
405 (((BE2_CMDS_PER_CXN
* 2 + phba
->fw_config
.iscsi_cid_count
* 2
406 + BE2_TMFS
) / 512) + 1) * 512;
407 phba
->params
.num_eq_entries
= (phba
->params
.num_eq_entries
< 1024)
408 ? 1024 : phba
->params
.num_eq_entries
;
409 SE_DEBUG(DBG_LVL_8
, "phba->params.num_eq_entries=%d\n",
410 phba
->params
.num_eq_entries
);
411 phba
->params
.num_cq_entries
=
412 (((BE2_CMDS_PER_CXN
* 2 + phba
->fw_config
.iscsi_cid_count
* 2
413 + BE2_TMFS
) / 512) + 1) * 512;
414 phba
->params
.wrbs_per_cxn
= 256;
417 static void hwi_ring_eq_db(struct beiscsi_hba
*phba
,
418 unsigned int id
, unsigned int clr_interrupt
,
419 unsigned int num_processed
,
420 unsigned char rearm
, unsigned char event
)
423 val
|= id
& DB_EQ_RING_ID_MASK
;
425 val
|= 1 << DB_EQ_REARM_SHIFT
;
427 val
|= 1 << DB_EQ_CLR_SHIFT
;
429 val
|= 1 << DB_EQ_EVNT_SHIFT
;
430 val
|= num_processed
<< DB_EQ_NUM_POPPED_SHIFT
;
431 iowrite32(val
, phba
->db_va
+ DB_EQ_OFFSET
);
435 * be_isr_mcc - The isr routine of the driver.
437 * @dev_id: Pointer to host adapter structure
439 static irqreturn_t
be_isr_mcc(int irq
, void *dev_id
)
441 struct beiscsi_hba
*phba
;
442 struct be_eq_entry
*eqe
= NULL
;
443 struct be_queue_info
*eq
;
444 struct be_queue_info
*mcc
;
445 unsigned int num_eq_processed
;
446 struct be_eq_obj
*pbe_eq
;
452 mcc
= &phba
->ctrl
.mcc_obj
.cq
;
453 eqe
= queue_tail_node(eq
);
455 SE_DEBUG(DBG_LVL_1
, "eqe is NULL\n");
457 num_eq_processed
= 0;
459 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
461 if (((eqe
->dw
[offsetof(struct amap_eq_entry
,
463 EQE_RESID_MASK
) >> 16) == mcc
->id
) {
464 spin_lock_irqsave(&phba
->isr_lock
, flags
);
465 phba
->todo_mcc_cq
= 1;
466 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
468 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
470 eqe
= queue_tail_node(eq
);
473 if (phba
->todo_mcc_cq
)
474 queue_work(phba
->wq
, &phba
->work_cqs
);
475 if (num_eq_processed
)
476 hwi_ring_eq_db(phba
, eq
->id
, 1, num_eq_processed
, 1, 1);
482 * be_isr_msix - The isr routine of the driver.
484 * @dev_id: Pointer to host adapter structure
486 static irqreturn_t
be_isr_msix(int irq
, void *dev_id
)
488 struct beiscsi_hba
*phba
;
489 struct be_eq_entry
*eqe
= NULL
;
490 struct be_queue_info
*eq
;
491 struct be_queue_info
*cq
;
492 unsigned int num_eq_processed
;
493 struct be_eq_obj
*pbe_eq
;
499 eqe
= queue_tail_node(eq
);
501 SE_DEBUG(DBG_LVL_1
, "eqe is NULL\n");
504 num_eq_processed
= 0;
505 if (blk_iopoll_enabled
) {
506 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
508 if (!blk_iopoll_sched_prep(&pbe_eq
->iopoll
))
509 blk_iopoll_sched(&pbe_eq
->iopoll
);
511 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
513 eqe
= queue_tail_node(eq
);
516 if (num_eq_processed
)
517 hwi_ring_eq_db(phba
, eq
->id
, 1, num_eq_processed
, 0, 1);
521 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
523 spin_lock_irqsave(&phba
->isr_lock
, flags
);
525 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
526 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
528 eqe
= queue_tail_node(eq
);
532 queue_work(phba
->wq
, &phba
->work_cqs
);
534 if (num_eq_processed
)
535 hwi_ring_eq_db(phba
, eq
->id
, 1, num_eq_processed
, 1, 1);
542 * be_isr - The isr routine of the driver.
544 * @dev_id: Pointer to host adapter structure
546 static irqreturn_t
be_isr(int irq
, void *dev_id
)
548 struct beiscsi_hba
*phba
;
549 struct hwi_controller
*phwi_ctrlr
;
550 struct hwi_context_memory
*phwi_context
;
551 struct be_eq_entry
*eqe
= NULL
;
552 struct be_queue_info
*eq
;
553 struct be_queue_info
*cq
;
554 struct be_queue_info
*mcc
;
555 unsigned long flags
, index
;
556 unsigned int num_mcceq_processed
, num_ioeq_processed
;
557 struct be_ctrl_info
*ctrl
;
558 struct be_eq_obj
*pbe_eq
;
563 isr
= ioread32(ctrl
->csr
+ CEV_ISR0_OFFSET
+
564 (PCI_FUNC(ctrl
->pdev
->devfn
) * CEV_ISR_SIZE
));
568 phwi_ctrlr
= phba
->phwi_ctrlr
;
569 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
570 pbe_eq
= &phwi_context
->be_eq
[0];
572 eq
= &phwi_context
->be_eq
[0].q
;
573 mcc
= &phba
->ctrl
.mcc_obj
.cq
;
575 eqe
= queue_tail_node(eq
);
577 SE_DEBUG(DBG_LVL_1
, "eqe is NULL\n");
579 num_ioeq_processed
= 0;
580 num_mcceq_processed
= 0;
581 if (blk_iopoll_enabled
) {
582 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
584 if (((eqe
->dw
[offsetof(struct amap_eq_entry
,
586 EQE_RESID_MASK
) >> 16) == mcc
->id
) {
587 spin_lock_irqsave(&phba
->isr_lock
, flags
);
588 phba
->todo_mcc_cq
= 1;
589 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
590 num_mcceq_processed
++;
592 if (!blk_iopoll_sched_prep(&pbe_eq
->iopoll
))
593 blk_iopoll_sched(&pbe_eq
->iopoll
);
594 num_ioeq_processed
++;
596 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
598 eqe
= queue_tail_node(eq
);
600 if (num_ioeq_processed
|| num_mcceq_processed
) {
601 if (phba
->todo_mcc_cq
)
602 queue_work(phba
->wq
, &phba
->work_cqs
);
604 if ((num_mcceq_processed
) && (!num_ioeq_processed
))
605 hwi_ring_eq_db(phba
, eq
->id
, 0,
606 (num_ioeq_processed
+
607 num_mcceq_processed
) , 1, 1);
609 hwi_ring_eq_db(phba
, eq
->id
, 0,
610 (num_ioeq_processed
+
611 num_mcceq_processed
), 0, 1);
617 cq
= &phwi_context
->be_cq
[0];
618 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
621 if (((eqe
->dw
[offsetof(struct amap_eq_entry
,
623 EQE_RESID_MASK
) >> 16) != cq
->id
) {
624 spin_lock_irqsave(&phba
->isr_lock
, flags
);
625 phba
->todo_mcc_cq
= 1;
626 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
628 spin_lock_irqsave(&phba
->isr_lock
, flags
);
630 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
632 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
634 eqe
= queue_tail_node(eq
);
635 num_ioeq_processed
++;
637 if (phba
->todo_cq
|| phba
->todo_mcc_cq
)
638 queue_work(phba
->wq
, &phba
->work_cqs
);
640 if (num_ioeq_processed
) {
641 hwi_ring_eq_db(phba
, eq
->id
, 0,
642 num_ioeq_processed
, 1, 1);
649 static int beiscsi_init_irqs(struct beiscsi_hba
*phba
)
651 struct pci_dev
*pcidev
= phba
->pcidev
;
652 struct hwi_controller
*phwi_ctrlr
;
653 struct hwi_context_memory
*phwi_context
;
654 int ret
, msix_vec
, i
, j
;
657 phwi_ctrlr
= phba
->phwi_ctrlr
;
658 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
660 if (phba
->msix_enabled
) {
661 for (i
= 0; i
< phba
->num_cpus
; i
++) {
662 sprintf(desc
, "beiscsi_msix_%04x", i
);
663 msix_vec
= phba
->msix_entries
[i
].vector
;
664 ret
= request_irq(msix_vec
, be_isr_msix
, 0, desc
,
665 &phwi_context
->be_eq
[i
]);
667 shost_printk(KERN_ERR
, phba
->shost
,
668 "beiscsi_init_irqs-Failed to"
669 "register msix for i = %d\n", i
);
675 msix_vec
= phba
->msix_entries
[i
].vector
;
676 ret
= request_irq(msix_vec
, be_isr_mcc
, 0, "beiscsi_msix_mcc",
677 &phwi_context
->be_eq
[i
]);
679 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_init_irqs-"
680 "Failed to register beiscsi_msix_mcc\n");
686 ret
= request_irq(pcidev
->irq
, be_isr
, IRQF_SHARED
,
689 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_init_irqs-"
690 "Failed to register irq\\n");
696 for (j
= i
- 1; j
== 0; j
++)
697 free_irq(msix_vec
, &phwi_context
->be_eq
[j
]);
701 static void hwi_ring_cq_db(struct beiscsi_hba
*phba
,
702 unsigned int id
, unsigned int num_processed
,
703 unsigned char rearm
, unsigned char event
)
706 val
|= id
& DB_CQ_RING_ID_MASK
;
708 val
|= 1 << DB_CQ_REARM_SHIFT
;
709 val
|= num_processed
<< DB_CQ_NUM_POPPED_SHIFT
;
710 iowrite32(val
, phba
->db_va
+ DB_CQ_OFFSET
);
714 beiscsi_process_async_pdu(struct beiscsi_conn
*beiscsi_conn
,
715 struct beiscsi_hba
*phba
,
717 struct pdu_base
*ppdu
,
718 unsigned long pdu_len
,
719 void *pbuffer
, unsigned long buf_len
)
721 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
722 struct iscsi_session
*session
= conn
->session
;
723 struct iscsi_task
*task
;
724 struct beiscsi_io_task
*io_task
;
725 struct iscsi_hdr
*login_hdr
;
727 switch (ppdu
->dw
[offsetof(struct amap_pdu_base
, opcode
) / 32] &
728 PDUBASE_OPCODE_MASK
) {
729 case ISCSI_OP_NOOP_IN
:
733 case ISCSI_OP_ASYNC_EVENT
:
735 case ISCSI_OP_REJECT
:
737 WARN_ON(!(buf_len
== 48));
738 SE_DEBUG(DBG_LVL_1
, "In ISCSI_OP_REJECT\n");
740 case ISCSI_OP_LOGIN_RSP
:
741 case ISCSI_OP_TEXT_RSP
:
742 task
= conn
->login_task
;
743 io_task
= task
->dd_data
;
744 login_hdr
= (struct iscsi_hdr
*)ppdu
;
745 login_hdr
->itt
= io_task
->libiscsi_itt
;
748 shost_printk(KERN_WARNING
, phba
->shost
,
749 "Unrecognized opcode 0x%x in async msg\n",
751 dw
[offsetof(struct amap_pdu_base
, opcode
) / 32]
752 & PDUBASE_OPCODE_MASK
));
756 spin_lock_bh(&session
->lock
);
757 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)ppdu
, pbuffer
, buf_len
);
758 spin_unlock_bh(&session
->lock
);
762 static struct sgl_handle
*alloc_io_sgl_handle(struct beiscsi_hba
*phba
)
764 struct sgl_handle
*psgl_handle
;
766 if (phba
->io_sgl_hndl_avbl
) {
768 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n",
769 phba
->io_sgl_alloc_index
);
770 psgl_handle
= phba
->io_sgl_hndl_base
[phba
->
772 phba
->io_sgl_hndl_base
[phba
->io_sgl_alloc_index
] = NULL
;
773 phba
->io_sgl_hndl_avbl
--;
774 if (phba
->io_sgl_alloc_index
== (phba
->params
.
776 phba
->io_sgl_alloc_index
= 0;
778 phba
->io_sgl_alloc_index
++;
785 free_io_sgl_handle(struct beiscsi_hba
*phba
, struct sgl_handle
*psgl_handle
)
787 SE_DEBUG(DBG_LVL_8
, "In free_,io_sgl_free_index=%d\n",
788 phba
->io_sgl_free_index
);
789 if (phba
->io_sgl_hndl_base
[phba
->io_sgl_free_index
]) {
791 * this can happen if clean_task is called on a task that
792 * failed in xmit_task or alloc_pdu.
795 "Double Free in IO SGL io_sgl_free_index=%d,"
796 "value there=%p\n", phba
->io_sgl_free_index
,
797 phba
->io_sgl_hndl_base
[phba
->io_sgl_free_index
]);
800 phba
->io_sgl_hndl_base
[phba
->io_sgl_free_index
] = psgl_handle
;
801 phba
->io_sgl_hndl_avbl
++;
802 if (phba
->io_sgl_free_index
== (phba
->params
.ios_per_ctrl
- 1))
803 phba
->io_sgl_free_index
= 0;
805 phba
->io_sgl_free_index
++;
809 * alloc_wrb_handle - To allocate a wrb handle
810 * @phba: The hba pointer
811 * @cid: The cid to use for allocation
813 * This happens under session_lock until submission to chip
815 struct wrb_handle
*alloc_wrb_handle(struct beiscsi_hba
*phba
, unsigned int cid
)
817 struct hwi_wrb_context
*pwrb_context
;
818 struct hwi_controller
*phwi_ctrlr
;
819 struct wrb_handle
*pwrb_handle
, *pwrb_handle_tmp
;
821 phwi_ctrlr
= phba
->phwi_ctrlr
;
822 pwrb_context
= &phwi_ctrlr
->wrb_context
[cid
];
823 if (pwrb_context
->wrb_handles_available
>= 2) {
824 pwrb_handle
= pwrb_context
->pwrb_handle_base
[
825 pwrb_context
->alloc_index
];
826 pwrb_context
->wrb_handles_available
--;
827 if (pwrb_context
->alloc_index
==
828 (phba
->params
.wrbs_per_cxn
- 1))
829 pwrb_context
->alloc_index
= 0;
831 pwrb_context
->alloc_index
++;
832 pwrb_handle_tmp
= pwrb_context
->pwrb_handle_base
[
833 pwrb_context
->alloc_index
];
834 pwrb_handle
->nxt_wrb_index
= pwrb_handle_tmp
->wrb_index
;
841 * free_wrb_handle - To free the wrb handle back to pool
842 * @phba: The hba pointer
843 * @pwrb_context: The context to free from
844 * @pwrb_handle: The wrb_handle to free
846 * This happens under session_lock until submission to chip
849 free_wrb_handle(struct beiscsi_hba
*phba
, struct hwi_wrb_context
*pwrb_context
,
850 struct wrb_handle
*pwrb_handle
)
852 pwrb_context
->pwrb_handle_base
[pwrb_context
->free_index
] = pwrb_handle
;
853 pwrb_context
->wrb_handles_available
++;
854 if (pwrb_context
->free_index
== (phba
->params
.wrbs_per_cxn
- 1))
855 pwrb_context
->free_index
= 0;
857 pwrb_context
->free_index
++;
860 "FREE WRB: pwrb_handle=%p free_index=0x%x"
861 "wrb_handles_available=%d\n",
862 pwrb_handle
, pwrb_context
->free_index
,
863 pwrb_context
->wrb_handles_available
);
866 static struct sgl_handle
*alloc_mgmt_sgl_handle(struct beiscsi_hba
*phba
)
868 struct sgl_handle
*psgl_handle
;
870 if (phba
->eh_sgl_hndl_avbl
) {
871 psgl_handle
= phba
->eh_sgl_hndl_base
[phba
->eh_sgl_alloc_index
];
872 phba
->eh_sgl_hndl_base
[phba
->eh_sgl_alloc_index
] = NULL
;
873 SE_DEBUG(DBG_LVL_8
, "mgmt_sgl_alloc_index=%d=0x%x\n",
874 phba
->eh_sgl_alloc_index
, phba
->eh_sgl_alloc_index
);
875 phba
->eh_sgl_hndl_avbl
--;
876 if (phba
->eh_sgl_alloc_index
==
877 (phba
->params
.icds_per_ctrl
- phba
->params
.ios_per_ctrl
-
879 phba
->eh_sgl_alloc_index
= 0;
881 phba
->eh_sgl_alloc_index
++;
888 free_mgmt_sgl_handle(struct beiscsi_hba
*phba
, struct sgl_handle
*psgl_handle
)
891 SE_DEBUG(DBG_LVL_8
, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d\n",
892 phba
->eh_sgl_free_index
);
893 if (phba
->eh_sgl_hndl_base
[phba
->eh_sgl_free_index
]) {
895 * this can happen if clean_task is called on a task that
896 * failed in xmit_task or alloc_pdu.
899 "Double Free in eh SGL ,eh_sgl_free_index=%d\n",
900 phba
->eh_sgl_free_index
);
903 phba
->eh_sgl_hndl_base
[phba
->eh_sgl_free_index
] = psgl_handle
;
904 phba
->eh_sgl_hndl_avbl
++;
905 if (phba
->eh_sgl_free_index
==
906 (phba
->params
.icds_per_ctrl
- phba
->params
.ios_per_ctrl
- 1))
907 phba
->eh_sgl_free_index
= 0;
909 phba
->eh_sgl_free_index
++;
913 be_complete_io(struct beiscsi_conn
*beiscsi_conn
,
914 struct iscsi_task
*task
, struct sol_cqe
*psol
)
916 struct beiscsi_io_task
*io_task
= task
->dd_data
;
917 struct be_status_bhs
*sts_bhs
=
918 (struct be_status_bhs
*)io_task
->cmd_bhs
;
919 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
920 unsigned int sense_len
;
921 unsigned char *sense
;
922 u32 resid
= 0, exp_cmdsn
, max_cmdsn
;
923 u8 rsp
, status
, flags
;
926 dw
[offsetof(struct amap_sol_cqe
, i_exp_cmd_sn
) / 32]
927 & SOL_EXP_CMD_SN_MASK
);
929 dw
[offsetof(struct amap_sol_cqe
, i_exp_cmd_sn
) / 32]
930 & SOL_EXP_CMD_SN_MASK
) +
931 ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_cmd_wnd
)
932 / 32] & SOL_CMD_WND_MASK
) >> 24) - 1);
933 rsp
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_resp
) / 32]
934 & SOL_RESP_MASK
) >> 16);
935 status
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_sts
) / 32]
936 & SOL_STS_MASK
) >> 8);
937 flags
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_flags
) / 32]
938 & SOL_FLAGS_MASK
) >> 24) | 0x80;
940 task
->sc
->result
= (DID_OK
<< 16) | status
;
941 if (rsp
!= ISCSI_STATUS_CMD_COMPLETED
) {
942 task
->sc
->result
= DID_ERROR
<< 16;
946 /* bidi not initially supported */
947 if (flags
& (ISCSI_FLAG_CMD_UNDERFLOW
| ISCSI_FLAG_CMD_OVERFLOW
)) {
948 resid
= (psol
->dw
[offsetof(struct amap_sol_cqe
, i_res_cnt
) /
949 32] & SOL_RES_CNT_MASK
);
951 if (!status
&& (flags
& ISCSI_FLAG_CMD_OVERFLOW
))
952 task
->sc
->result
= DID_ERROR
<< 16;
954 if (flags
& ISCSI_FLAG_CMD_UNDERFLOW
) {
955 scsi_set_resid(task
->sc
, resid
);
956 if (!status
&& (scsi_bufflen(task
->sc
) - resid
<
957 task
->sc
->underflow
))
958 task
->sc
->result
= DID_ERROR
<< 16;
962 if (status
== SAM_STAT_CHECK_CONDITION
) {
963 unsigned short *slen
= (unsigned short *)sts_bhs
->sense_info
;
964 sense
= sts_bhs
->sense_info
+ sizeof(unsigned short);
965 sense_len
= cpu_to_be16(*slen
);
966 memcpy(task
->sc
->sense_buffer
, sense
,
967 min_t(u16
, sense_len
, SCSI_SENSE_BUFFERSIZE
));
970 if (io_task
->cmd_bhs
->iscsi_hdr
.flags
& ISCSI_FLAG_CMD_READ
) {
971 if (psol
->dw
[offsetof(struct amap_sol_cqe
, i_res_cnt
) / 32]
973 conn
->rxdata_octets
+= (psol
->
974 dw
[offsetof(struct amap_sol_cqe
, i_res_cnt
) / 32]
978 scsi_dma_unmap(io_task
->scsi_cmnd
);
979 iscsi_complete_scsi_task(task
, exp_cmdsn
, max_cmdsn
);
983 be_complete_logout(struct beiscsi_conn
*beiscsi_conn
,
984 struct iscsi_task
*task
, struct sol_cqe
*psol
)
986 struct iscsi_logout_rsp
*hdr
;
987 struct beiscsi_io_task
*io_task
= task
->dd_data
;
988 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
990 hdr
= (struct iscsi_logout_rsp
*)task
->hdr
;
991 hdr
->opcode
= ISCSI_OP_LOGOUT_RSP
;
994 hdr
->flags
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_flags
) / 32]
995 & SOL_FLAGS_MASK
) >> 24) | 0x80;
996 hdr
->response
= (psol
->dw
[offsetof(struct amap_sol_cqe
, i_resp
) /
997 32] & SOL_RESP_MASK
);
998 hdr
->exp_cmdsn
= cpu_to_be32(psol
->
999 dw
[offsetof(struct amap_sol_cqe
, i_exp_cmd_sn
) / 32]
1000 & SOL_EXP_CMD_SN_MASK
);
1001 hdr
->max_cmdsn
= be32_to_cpu((psol
->
1002 dw
[offsetof(struct amap_sol_cqe
, i_exp_cmd_sn
) / 32]
1003 & SOL_EXP_CMD_SN_MASK
) +
1004 ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_cmd_wnd
)
1005 / 32] & SOL_CMD_WND_MASK
) >> 24) - 1);
1006 hdr
->dlength
[0] = 0;
1007 hdr
->dlength
[1] = 0;
1008 hdr
->dlength
[2] = 0;
1010 hdr
->itt
= io_task
->libiscsi_itt
;
1011 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
, NULL
, 0);
1015 be_complete_tmf(struct beiscsi_conn
*beiscsi_conn
,
1016 struct iscsi_task
*task
, struct sol_cqe
*psol
)
1018 struct iscsi_tm_rsp
*hdr
;
1019 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
1020 struct beiscsi_io_task
*io_task
= task
->dd_data
;
1022 hdr
= (struct iscsi_tm_rsp
*)task
->hdr
;
1023 hdr
->opcode
= ISCSI_OP_SCSI_TMFUNC_RSP
;
1024 hdr
->flags
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_flags
) / 32]
1025 & SOL_FLAGS_MASK
) >> 24) | 0x80;
1026 hdr
->response
= (psol
->dw
[offsetof(struct amap_sol_cqe
, i_resp
) /
1027 32] & SOL_RESP_MASK
);
1028 hdr
->exp_cmdsn
= cpu_to_be32(psol
->dw
[offsetof(struct amap_sol_cqe
,
1029 i_exp_cmd_sn
) / 32] & SOL_EXP_CMD_SN_MASK
);
1030 hdr
->max_cmdsn
= be32_to_cpu((psol
->dw
[offsetof(struct amap_sol_cqe
,
1031 i_exp_cmd_sn
) / 32] & SOL_EXP_CMD_SN_MASK
) +
1032 ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_cmd_wnd
)
1033 / 32] & SOL_CMD_WND_MASK
) >> 24) - 1);
1034 hdr
->itt
= io_task
->libiscsi_itt
;
1035 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
, NULL
, 0);
1039 hwi_complete_drvr_msgs(struct beiscsi_conn
*beiscsi_conn
,
1040 struct beiscsi_hba
*phba
, struct sol_cqe
*psol
)
1042 struct hwi_wrb_context
*pwrb_context
;
1043 struct wrb_handle
*pwrb_handle
= NULL
;
1044 struct hwi_controller
*phwi_ctrlr
;
1045 struct iscsi_task
*task
;
1046 struct beiscsi_io_task
*io_task
;
1047 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
1048 struct iscsi_session
*session
= conn
->session
;
1050 phwi_ctrlr
= phba
->phwi_ctrlr
;
1051 pwrb_context
= &phwi_ctrlr
->wrb_context
[((psol
->
1052 dw
[offsetof(struct amap_sol_cqe
, cid
) / 32] &
1053 SOL_CID_MASK
) >> 6) -
1054 phba
->fw_config
.iscsi_cid_start
];
1055 pwrb_handle
= pwrb_context
->pwrb_handle_basestd
[((psol
->
1056 dw
[offsetof(struct amap_sol_cqe
, wrb_index
) /
1057 32] & SOL_WRB_INDEX_MASK
) >> 16)];
1058 task
= pwrb_handle
->pio_handle
;
1060 io_task
= task
->dd_data
;
1061 spin_lock(&phba
->mgmt_sgl_lock
);
1062 free_mgmt_sgl_handle(phba
, io_task
->psgl_handle
);
1063 spin_unlock(&phba
->mgmt_sgl_lock
);
1064 spin_lock_bh(&session
->lock
);
1065 free_wrb_handle(phba
, pwrb_context
, pwrb_handle
);
1066 spin_unlock_bh(&session
->lock
);
1070 be_complete_nopin_resp(struct beiscsi_conn
*beiscsi_conn
,
1071 struct iscsi_task
*task
, struct sol_cqe
*psol
)
1073 struct iscsi_nopin
*hdr
;
1074 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
1075 struct beiscsi_io_task
*io_task
= task
->dd_data
;
1077 hdr
= (struct iscsi_nopin
*)task
->hdr
;
1078 hdr
->flags
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_flags
) / 32]
1079 & SOL_FLAGS_MASK
) >> 24) | 0x80;
1080 hdr
->exp_cmdsn
= cpu_to_be32(psol
->dw
[offsetof(struct amap_sol_cqe
,
1081 i_exp_cmd_sn
) / 32] & SOL_EXP_CMD_SN_MASK
);
1082 hdr
->max_cmdsn
= be32_to_cpu((psol
->dw
[offsetof(struct amap_sol_cqe
,
1083 i_exp_cmd_sn
) / 32] & SOL_EXP_CMD_SN_MASK
) +
1084 ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_cmd_wnd
)
1085 / 32] & SOL_CMD_WND_MASK
) >> 24) - 1);
1086 hdr
->opcode
= ISCSI_OP_NOOP_IN
;
1087 hdr
->itt
= io_task
->libiscsi_itt
;
1088 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
, NULL
, 0);
1091 static void hwi_complete_cmd(struct beiscsi_conn
*beiscsi_conn
,
1092 struct beiscsi_hba
*phba
, struct sol_cqe
*psol
)
1094 struct hwi_wrb_context
*pwrb_context
;
1095 struct wrb_handle
*pwrb_handle
;
1096 struct iscsi_wrb
*pwrb
= NULL
;
1097 struct hwi_controller
*phwi_ctrlr
;
1098 struct iscsi_task
*task
;
1100 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
1101 struct iscsi_session
*session
= conn
->session
;
1103 phwi_ctrlr
= phba
->phwi_ctrlr
;
1104 pwrb_context
= &phwi_ctrlr
->wrb_context
[((psol
->dw
[offsetof
1105 (struct amap_sol_cqe
, cid
) / 32]
1106 & SOL_CID_MASK
) >> 6) -
1107 phba
->fw_config
.iscsi_cid_start
];
1108 pwrb_handle
= pwrb_context
->pwrb_handle_basestd
[((psol
->
1109 dw
[offsetof(struct amap_sol_cqe
, wrb_index
) /
1110 32] & SOL_WRB_INDEX_MASK
) >> 16)];
1111 task
= pwrb_handle
->pio_handle
;
1112 pwrb
= pwrb_handle
->pwrb
;
1113 type
= (pwrb
->dw
[offsetof(struct amap_iscsi_wrb
, type
) / 32] &
1114 WRB_TYPE_MASK
) >> 28;
1116 spin_lock_bh(&session
->lock
);
1119 case HWH_TYPE_IO_RD
:
1120 if ((task
->hdr
->opcode
& ISCSI_OPCODE_MASK
) ==
1122 be_complete_nopin_resp(beiscsi_conn
, task
, psol
);
1124 be_complete_io(beiscsi_conn
, task
, psol
);
1127 case HWH_TYPE_LOGOUT
:
1128 if ((task
->hdr
->opcode
& ISCSI_OPCODE_MASK
) == ISCSI_OP_LOGOUT
)
1129 be_complete_logout(beiscsi_conn
, task
, psol
);
1131 be_complete_tmf(beiscsi_conn
, task
, psol
);
1135 case HWH_TYPE_LOGIN
:
1137 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
1138 "- Solicited path\n");
1142 be_complete_nopin_resp(beiscsi_conn
, task
, psol
);
1146 shost_printk(KERN_WARNING
, phba
->shost
,
1147 "In hwi_complete_cmd, unknown type = %d"
1148 "wrb_index 0x%x CID 0x%x\n", type
,
1149 ((psol
->dw
[offsetof(struct amap_iscsi_wrb
,
1150 type
) / 32] & SOL_WRB_INDEX_MASK
) >> 16),
1151 ((psol
->dw
[offsetof(struct amap_sol_cqe
,
1152 cid
) / 32] & SOL_CID_MASK
) >> 6));
1156 spin_unlock_bh(&session
->lock
);
1159 static struct list_head
*hwi_get_async_busy_list(struct hwi_async_pdu_context
1160 *pasync_ctx
, unsigned int is_header
,
1161 unsigned int host_write_ptr
)
1164 return &pasync_ctx
->async_entry
[host_write_ptr
].
1167 return &pasync_ctx
->async_entry
[host_write_ptr
].data_busy_list
;
1170 static struct async_pdu_handle
*
1171 hwi_get_async_handle(struct beiscsi_hba
*phba
,
1172 struct beiscsi_conn
*beiscsi_conn
,
1173 struct hwi_async_pdu_context
*pasync_ctx
,
1174 struct i_t_dpdu_cqe
*pdpdu_cqe
, unsigned int *pcq_index
)
1176 struct be_bus_address phys_addr
;
1177 struct list_head
*pbusy_list
;
1178 struct async_pdu_handle
*pasync_handle
= NULL
;
1180 unsigned char buffer_index
= -1;
1181 unsigned char is_header
= 0;
1183 phys_addr
.u
.a32
.address_lo
=
1184 pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
, db_addr_lo
) / 32] -
1185 ((pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
, dpl
) / 32]
1186 & PDUCQE_DPL_MASK
) >> 16);
1187 phys_addr
.u
.a32
.address_hi
=
1188 pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
, db_addr_hi
) / 32];
1190 phys_addr
.u
.a64
.address
=
1191 *((unsigned long long *)(&phys_addr
.u
.a64
.address
));
1193 switch (pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
, code
) / 32]
1194 & PDUCQE_CODE_MASK
) {
1195 case UNSOL_HDR_NOTIFY
:
1198 pbusy_list
= hwi_get_async_busy_list(pasync_ctx
, 1,
1199 (pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
,
1200 index
) / 32] & PDUCQE_INDEX_MASK
));
1202 buffer_len
= (unsigned int)(phys_addr
.u
.a64
.address
-
1203 pasync_ctx
->async_header
.pa_base
.u
.a64
.address
);
1205 buffer_index
= buffer_len
/
1206 pasync_ctx
->async_header
.buffer_size
;
1209 case UNSOL_DATA_NOTIFY
:
1210 pbusy_list
= hwi_get_async_busy_list(pasync_ctx
, 0, (pdpdu_cqe
->
1211 dw
[offsetof(struct amap_i_t_dpdu_cqe
,
1212 index
) / 32] & PDUCQE_INDEX_MASK
));
1213 buffer_len
= (unsigned long)(phys_addr
.u
.a64
.address
-
1214 pasync_ctx
->async_data
.pa_base
.u
.
1216 buffer_index
= buffer_len
/ pasync_ctx
->async_data
.buffer_size
;
1220 shost_printk(KERN_WARNING
, phba
->shost
,
1221 "Unexpected code=%d\n",
1222 pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
,
1223 code
) / 32] & PDUCQE_CODE_MASK
);
1227 WARN_ON(!(buffer_index
<= pasync_ctx
->async_data
.num_entries
));
1228 WARN_ON(list_empty(pbusy_list
));
1229 list_for_each_entry(pasync_handle
, pbusy_list
, link
) {
1230 WARN_ON(pasync_handle
->consumed
);
1231 if (pasync_handle
->index
== buffer_index
)
1235 WARN_ON(!pasync_handle
);
1237 pasync_handle
->cri
= (unsigned short)beiscsi_conn
->beiscsi_conn_cid
-
1238 phba
->fw_config
.iscsi_cid_start
;
1239 pasync_handle
->is_header
= is_header
;
1240 pasync_handle
->buffer_len
= ((pdpdu_cqe
->
1241 dw
[offsetof(struct amap_i_t_dpdu_cqe
, dpl
) / 32]
1242 & PDUCQE_DPL_MASK
) >> 16);
1244 *pcq_index
= (pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
,
1245 index
) / 32] & PDUCQE_INDEX_MASK
);
1246 return pasync_handle
;
1250 hwi_update_async_writables(struct hwi_async_pdu_context
*pasync_ctx
,
1251 unsigned int is_header
, unsigned int cq_index
)
1253 struct list_head
*pbusy_list
;
1254 struct async_pdu_handle
*pasync_handle
;
1255 unsigned int num_entries
, writables
= 0;
1256 unsigned int *pep_read_ptr
, *pwritables
;
1260 pep_read_ptr
= &pasync_ctx
->async_header
.ep_read_ptr
;
1261 pwritables
= &pasync_ctx
->async_header
.writables
;
1262 num_entries
= pasync_ctx
->async_header
.num_entries
;
1264 pep_read_ptr
= &pasync_ctx
->async_data
.ep_read_ptr
;
1265 pwritables
= &pasync_ctx
->async_data
.writables
;
1266 num_entries
= pasync_ctx
->async_data
.num_entries
;
1269 while ((*pep_read_ptr
) != cq_index
) {
1271 *pep_read_ptr
= (*pep_read_ptr
) % num_entries
;
1273 pbusy_list
= hwi_get_async_busy_list(pasync_ctx
, is_header
,
1276 WARN_ON(list_empty(pbusy_list
));
1278 if (!list_empty(pbusy_list
)) {
1279 pasync_handle
= list_entry(pbusy_list
->next
,
1280 struct async_pdu_handle
,
1282 WARN_ON(!pasync_handle
);
1283 pasync_handle
->consumed
= 1;
1291 "Duplicate notification received - index 0x%x!!\n",
1296 *pwritables
= *pwritables
+ writables
;
1300 static unsigned int hwi_free_async_msg(struct beiscsi_hba
*phba
,
1303 struct hwi_controller
*phwi_ctrlr
;
1304 struct hwi_async_pdu_context
*pasync_ctx
;
1305 struct async_pdu_handle
*pasync_handle
, *tmp_handle
;
1306 struct list_head
*plist
;
1309 phwi_ctrlr
= phba
->phwi_ctrlr
;
1310 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr
);
1312 plist
= &pasync_ctx
->async_entry
[cri
].wait_queue
.list
;
1314 list_for_each_entry_safe(pasync_handle
, tmp_handle
, plist
, link
) {
1315 list_del(&pasync_handle
->link
);
1318 list_add_tail(&pasync_handle
->link
,
1319 &pasync_ctx
->async_header
.free_list
);
1320 pasync_ctx
->async_header
.free_entries
++;
1323 list_add_tail(&pasync_handle
->link
,
1324 &pasync_ctx
->async_data
.free_list
);
1325 pasync_ctx
->async_data
.free_entries
++;
1330 INIT_LIST_HEAD(&pasync_ctx
->async_entry
[cri
].wait_queue
.list
);
1331 pasync_ctx
->async_entry
[cri
].wait_queue
.hdr_received
= 0;
1332 pasync_ctx
->async_entry
[cri
].wait_queue
.bytes_received
= 0;
1336 static struct phys_addr
*
1337 hwi_get_ring_address(struct hwi_async_pdu_context
*pasync_ctx
,
1338 unsigned int is_header
, unsigned int host_write_ptr
)
1340 struct phys_addr
*pasync_sge
= NULL
;
1343 pasync_sge
= pasync_ctx
->async_header
.ring_base
;
1345 pasync_sge
= pasync_ctx
->async_data
.ring_base
;
1347 return pasync_sge
+ host_write_ptr
;
1350 static void hwi_post_async_buffers(struct beiscsi_hba
*phba
,
1351 unsigned int is_header
)
1353 struct hwi_controller
*phwi_ctrlr
;
1354 struct hwi_async_pdu_context
*pasync_ctx
;
1355 struct async_pdu_handle
*pasync_handle
;
1356 struct list_head
*pfree_link
, *pbusy_list
;
1357 struct phys_addr
*pasync_sge
;
1358 unsigned int ring_id
, num_entries
;
1359 unsigned int host_write_num
;
1360 unsigned int writables
;
1364 phwi_ctrlr
= phba
->phwi_ctrlr
;
1365 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr
);
1368 num_entries
= pasync_ctx
->async_header
.num_entries
;
1369 writables
= min(pasync_ctx
->async_header
.writables
,
1370 pasync_ctx
->async_header
.free_entries
);
1371 pfree_link
= pasync_ctx
->async_header
.free_list
.next
;
1372 host_write_num
= pasync_ctx
->async_header
.host_write_ptr
;
1373 ring_id
= phwi_ctrlr
->default_pdu_hdr
.id
;
1375 num_entries
= pasync_ctx
->async_data
.num_entries
;
1376 writables
= min(pasync_ctx
->async_data
.writables
,
1377 pasync_ctx
->async_data
.free_entries
);
1378 pfree_link
= pasync_ctx
->async_data
.free_list
.next
;
1379 host_write_num
= pasync_ctx
->async_data
.host_write_ptr
;
1380 ring_id
= phwi_ctrlr
->default_pdu_data
.id
;
1383 writables
= (writables
/ 8) * 8;
1385 for (i
= 0; i
< writables
; i
++) {
1387 hwi_get_async_busy_list(pasync_ctx
, is_header
,
1390 list_entry(pfree_link
, struct async_pdu_handle
,
1392 WARN_ON(!pasync_handle
);
1393 pasync_handle
->consumed
= 0;
1395 pfree_link
= pfree_link
->next
;
1397 pasync_sge
= hwi_get_ring_address(pasync_ctx
,
1398 is_header
, host_write_num
);
1400 pasync_sge
->hi
= pasync_handle
->pa
.u
.a32
.address_lo
;
1401 pasync_sge
->lo
= pasync_handle
->pa
.u
.a32
.address_hi
;
1403 list_move(&pasync_handle
->link
, pbusy_list
);
1406 host_write_num
= host_write_num
% num_entries
;
1410 pasync_ctx
->async_header
.host_write_ptr
=
1412 pasync_ctx
->async_header
.free_entries
-= writables
;
1413 pasync_ctx
->async_header
.writables
-= writables
;
1414 pasync_ctx
->async_header
.busy_entries
+= writables
;
1416 pasync_ctx
->async_data
.host_write_ptr
= host_write_num
;
1417 pasync_ctx
->async_data
.free_entries
-= writables
;
1418 pasync_ctx
->async_data
.writables
-= writables
;
1419 pasync_ctx
->async_data
.busy_entries
+= writables
;
1422 doorbell
|= ring_id
& DB_DEF_PDU_RING_ID_MASK
;
1423 doorbell
|= 1 << DB_DEF_PDU_REARM_SHIFT
;
1424 doorbell
|= 0 << DB_DEF_PDU_EVENT_SHIFT
;
1425 doorbell
|= (writables
& DB_DEF_PDU_CQPROC_MASK
)
1426 << DB_DEF_PDU_CQPROC_SHIFT
;
1428 iowrite32(doorbell
, phba
->db_va
+ DB_RXULP0_OFFSET
);
1432 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba
*phba
,
1433 struct beiscsi_conn
*beiscsi_conn
,
1434 struct i_t_dpdu_cqe
*pdpdu_cqe
)
1436 struct hwi_controller
*phwi_ctrlr
;
1437 struct hwi_async_pdu_context
*pasync_ctx
;
1438 struct async_pdu_handle
*pasync_handle
= NULL
;
1439 unsigned int cq_index
= -1;
1441 phwi_ctrlr
= phba
->phwi_ctrlr
;
1442 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr
);
1444 pasync_handle
= hwi_get_async_handle(phba
, beiscsi_conn
, pasync_ctx
,
1445 pdpdu_cqe
, &cq_index
);
1446 BUG_ON(pasync_handle
->is_header
!= 0);
1447 if (pasync_handle
->consumed
== 0)
1448 hwi_update_async_writables(pasync_ctx
, pasync_handle
->is_header
,
1451 hwi_free_async_msg(phba
, pasync_handle
->cri
);
1452 hwi_post_async_buffers(phba
, pasync_handle
->is_header
);
1456 hwi_fwd_async_msg(struct beiscsi_conn
*beiscsi_conn
,
1457 struct beiscsi_hba
*phba
,
1458 struct hwi_async_pdu_context
*pasync_ctx
, unsigned short cri
)
1460 struct list_head
*plist
;
1461 struct async_pdu_handle
*pasync_handle
;
1463 unsigned int hdr_len
= 0, buf_len
= 0;
1464 unsigned int status
, index
= 0, offset
= 0;
1465 void *pfirst_buffer
= NULL
;
1466 unsigned int num_buf
= 0;
1468 plist
= &pasync_ctx
->async_entry
[cri
].wait_queue
.list
;
1470 list_for_each_entry(pasync_handle
, plist
, link
) {
1472 phdr
= pasync_handle
->pbuffer
;
1473 hdr_len
= pasync_handle
->buffer_len
;
1475 buf_len
= pasync_handle
->buffer_len
;
1477 pfirst_buffer
= pasync_handle
->pbuffer
;
1480 memcpy(pfirst_buffer
+ offset
,
1481 pasync_handle
->pbuffer
, buf_len
);
1487 status
= beiscsi_process_async_pdu(beiscsi_conn
, phba
,
1488 (beiscsi_conn
->beiscsi_conn_cid
-
1489 phba
->fw_config
.iscsi_cid_start
),
1490 phdr
, hdr_len
, pfirst_buffer
,
1494 hwi_free_async_msg(phba
, cri
);
1499 hwi_gather_async_pdu(struct beiscsi_conn
*beiscsi_conn
,
1500 struct beiscsi_hba
*phba
,
1501 struct async_pdu_handle
*pasync_handle
)
1503 struct hwi_async_pdu_context
*pasync_ctx
;
1504 struct hwi_controller
*phwi_ctrlr
;
1505 unsigned int bytes_needed
= 0, status
= 0;
1506 unsigned short cri
= pasync_handle
->cri
;
1507 struct pdu_base
*ppdu
;
1509 phwi_ctrlr
= phba
->phwi_ctrlr
;
1510 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr
);
1512 list_del(&pasync_handle
->link
);
1513 if (pasync_handle
->is_header
) {
1514 pasync_ctx
->async_header
.busy_entries
--;
1515 if (pasync_ctx
->async_entry
[cri
].wait_queue
.hdr_received
) {
1516 hwi_free_async_msg(phba
, cri
);
1520 pasync_ctx
->async_entry
[cri
].wait_queue
.bytes_received
= 0;
1521 pasync_ctx
->async_entry
[cri
].wait_queue
.hdr_received
= 1;
1522 pasync_ctx
->async_entry
[cri
].wait_queue
.hdr_len
=
1523 (unsigned short)pasync_handle
->buffer_len
;
1524 list_add_tail(&pasync_handle
->link
,
1525 &pasync_ctx
->async_entry
[cri
].wait_queue
.list
);
1527 ppdu
= pasync_handle
->pbuffer
;
1528 bytes_needed
= ((((ppdu
->dw
[offsetof(struct amap_pdu_base
,
1529 data_len_hi
) / 32] & PDUBASE_DATALENHI_MASK
) << 8) &
1530 0xFFFF0000) | ((be16_to_cpu((ppdu
->
1531 dw
[offsetof(struct amap_pdu_base
, data_len_lo
) / 32]
1532 & PDUBASE_DATALENLO_MASK
) >> 16)) & 0x0000FFFF));
1535 pasync_ctx
->async_entry
[cri
].wait_queue
.bytes_needed
=
1538 if (bytes_needed
== 0)
1539 status
= hwi_fwd_async_msg(beiscsi_conn
, phba
,
1543 pasync_ctx
->async_data
.busy_entries
--;
1544 if (pasync_ctx
->async_entry
[cri
].wait_queue
.hdr_received
) {
1545 list_add_tail(&pasync_handle
->link
,
1546 &pasync_ctx
->async_entry
[cri
].wait_queue
.
1548 pasync_ctx
->async_entry
[cri
].wait_queue
.
1550 (unsigned short)pasync_handle
->buffer_len
;
1552 if (pasync_ctx
->async_entry
[cri
].wait_queue
.
1554 pasync_ctx
->async_entry
[cri
].wait_queue
.
1556 status
= hwi_fwd_async_msg(beiscsi_conn
, phba
,
1563 static void hwi_process_default_pdu_ring(struct beiscsi_conn
*beiscsi_conn
,
1564 struct beiscsi_hba
*phba
,
1565 struct i_t_dpdu_cqe
*pdpdu_cqe
)
1567 struct hwi_controller
*phwi_ctrlr
;
1568 struct hwi_async_pdu_context
*pasync_ctx
;
1569 struct async_pdu_handle
*pasync_handle
= NULL
;
1570 unsigned int cq_index
= -1;
1572 phwi_ctrlr
= phba
->phwi_ctrlr
;
1573 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr
);
1574 pasync_handle
= hwi_get_async_handle(phba
, beiscsi_conn
, pasync_ctx
,
1575 pdpdu_cqe
, &cq_index
);
1577 if (pasync_handle
->consumed
== 0)
1578 hwi_update_async_writables(pasync_ctx
, pasync_handle
->is_header
,
1580 hwi_gather_async_pdu(beiscsi_conn
, phba
, pasync_handle
);
1581 hwi_post_async_buffers(phba
, pasync_handle
->is_header
);
1584 static void beiscsi_process_mcc_isr(struct beiscsi_hba
*phba
)
1586 struct be_queue_info
*mcc_cq
;
1587 struct be_mcc_compl
*mcc_compl
;
1588 unsigned int num_processed
= 0;
1590 mcc_cq
= &phba
->ctrl
.mcc_obj
.cq
;
1591 mcc_compl
= queue_tail_node(mcc_cq
);
1592 mcc_compl
->flags
= le32_to_cpu(mcc_compl
->flags
);
1593 while (mcc_compl
->flags
& CQE_FLAGS_VALID_MASK
) {
1595 if (num_processed
>= 32) {
1596 hwi_ring_cq_db(phba
, mcc_cq
->id
,
1597 num_processed
, 0, 0);
1600 if (mcc_compl
->flags
& CQE_FLAGS_ASYNC_MASK
) {
1601 /* Interpret flags as an async trailer */
1602 if (is_link_state_evt(mcc_compl
->flags
))
1603 /* Interpret compl as a async link evt */
1604 beiscsi_async_link_state_process(phba
,
1605 (struct be_async_event_link_state
*) mcc_compl
);
1608 " Unsupported Async Event, flags"
1609 " = 0x%08x\n", mcc_compl
->flags
);
1610 } else if (mcc_compl
->flags
& CQE_FLAGS_COMPLETED_MASK
) {
1611 be_mcc_compl_process_isr(&phba
->ctrl
, mcc_compl
);
1612 atomic_dec(&phba
->ctrl
.mcc_obj
.q
.used
);
1615 mcc_compl
->flags
= 0;
1616 queue_tail_inc(mcc_cq
);
1617 mcc_compl
= queue_tail_node(mcc_cq
);
1618 mcc_compl
->flags
= le32_to_cpu(mcc_compl
->flags
);
1622 if (num_processed
> 0)
1623 hwi_ring_cq_db(phba
, mcc_cq
->id
, num_processed
, 1, 0);
1627 static unsigned int beiscsi_process_cq(struct be_eq_obj
*pbe_eq
)
1629 struct be_queue_info
*cq
;
1630 struct sol_cqe
*sol
;
1631 struct dmsg_cqe
*dmsg
;
1632 unsigned int num_processed
= 0;
1633 unsigned int tot_nump
= 0;
1634 struct beiscsi_conn
*beiscsi_conn
;
1635 struct beiscsi_endpoint
*beiscsi_ep
;
1636 struct iscsi_endpoint
*ep
;
1637 struct beiscsi_hba
*phba
;
1640 sol
= queue_tail_node(cq
);
1641 phba
= pbe_eq
->phba
;
1643 while (sol
->dw
[offsetof(struct amap_sol_cqe
, valid
) / 32] &
1645 be_dws_le_to_cpu(sol
, sizeof(struct sol_cqe
));
1647 ep
= phba
->ep_array
[(u32
) ((sol
->
1648 dw
[offsetof(struct amap_sol_cqe
, cid
) / 32] &
1649 SOL_CID_MASK
) >> 6) -
1650 phba
->fw_config
.iscsi_cid_start
];
1652 beiscsi_ep
= ep
->dd_data
;
1653 beiscsi_conn
= beiscsi_ep
->conn
;
1655 if (num_processed
>= 32) {
1656 hwi_ring_cq_db(phba
, cq
->id
,
1657 num_processed
, 0, 0);
1658 tot_nump
+= num_processed
;
1662 switch ((u32
) sol
->dw
[offsetof(struct amap_sol_cqe
, code
) /
1663 32] & CQE_CODE_MASK
) {
1664 case SOL_CMD_COMPLETE
:
1665 hwi_complete_cmd(beiscsi_conn
, phba
, sol
);
1667 case DRIVERMSG_NOTIFY
:
1668 SE_DEBUG(DBG_LVL_8
, "Received DRIVERMSG_NOTIFY\n");
1669 dmsg
= (struct dmsg_cqe
*)sol
;
1670 hwi_complete_drvr_msgs(beiscsi_conn
, phba
, sol
);
1672 case UNSOL_HDR_NOTIFY
:
1673 SE_DEBUG(DBG_LVL_8
, "Received UNSOL_HDR_ NOTIFY\n");
1674 hwi_process_default_pdu_ring(beiscsi_conn
, phba
,
1675 (struct i_t_dpdu_cqe
*)sol
);
1677 case UNSOL_DATA_NOTIFY
:
1678 SE_DEBUG(DBG_LVL_8
, "Received UNSOL_DATA_NOTIFY\n");
1679 hwi_process_default_pdu_ring(beiscsi_conn
, phba
,
1680 (struct i_t_dpdu_cqe
*)sol
);
1682 case CXN_INVALIDATE_INDEX_NOTIFY
:
1683 case CMD_INVALIDATED_NOTIFY
:
1684 case CXN_INVALIDATE_NOTIFY
:
1686 "Ignoring CQ Error notification for cmd/cxn"
1689 case SOL_CMD_KILLED_DATA_DIGEST_ERR
:
1690 case CMD_KILLED_INVALID_STATSN_RCVD
:
1691 case CMD_KILLED_INVALID_R2T_RCVD
:
1692 case CMD_CXN_KILLED_LUN_INVALID
:
1693 case CMD_CXN_KILLED_ICD_INVALID
:
1694 case CMD_CXN_KILLED_ITT_INVALID
:
1695 case CMD_CXN_KILLED_SEQ_OUTOFORDER
:
1696 case CMD_CXN_KILLED_INVALID_DATASN_RCVD
:
1698 "CQ Error notification for cmd.. "
1699 "code %d cid 0x%x\n",
1700 sol
->dw
[offsetof(struct amap_sol_cqe
, code
) /
1701 32] & CQE_CODE_MASK
,
1702 (sol
->dw
[offsetof(struct amap_sol_cqe
, cid
) /
1703 32] & SOL_CID_MASK
));
1705 case UNSOL_DATA_DIGEST_ERROR_NOTIFY
:
1707 "Digest error on def pdu ring, dropping..\n");
1708 hwi_flush_default_pdu_buffer(phba
, beiscsi_conn
,
1709 (struct i_t_dpdu_cqe
*) sol
);
1711 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL
:
1712 case CXN_KILLED_BURST_LEN_MISMATCH
:
1713 case CXN_KILLED_AHS_RCVD
:
1714 case CXN_KILLED_HDR_DIGEST_ERR
:
1715 case CXN_KILLED_UNKNOWN_HDR
:
1716 case CXN_KILLED_STALE_ITT_TTT_RCVD
:
1717 case CXN_KILLED_INVALID_ITT_TTT_RCVD
:
1718 case CXN_KILLED_TIMED_OUT
:
1719 case CXN_KILLED_FIN_RCVD
:
1720 case CXN_KILLED_BAD_UNSOL_PDU_RCVD
:
1721 case CXN_KILLED_BAD_WRB_INDEX_ERROR
:
1722 case CXN_KILLED_OVER_RUN_RESIDUAL
:
1723 case CXN_KILLED_UNDER_RUN_RESIDUAL
:
1724 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN
:
1725 SE_DEBUG(DBG_LVL_1
, "CQ Error %d, reset CID "
1727 sol
->dw
[offsetof(struct amap_sol_cqe
, code
) /
1728 32] & CQE_CODE_MASK
,
1729 (sol
->dw
[offsetof(struct amap_sol_cqe
, cid
) /
1730 32] & CQE_CID_MASK
));
1731 iscsi_conn_failure(beiscsi_conn
->conn
,
1732 ISCSI_ERR_CONN_FAILED
);
1734 case CXN_KILLED_RST_SENT
:
1735 case CXN_KILLED_RST_RCVD
:
1736 SE_DEBUG(DBG_LVL_1
, "CQ Error %d, reset"
1737 "received/sent on CID 0x%x...\n",
1738 sol
->dw
[offsetof(struct amap_sol_cqe
, code
) /
1739 32] & CQE_CODE_MASK
,
1740 (sol
->dw
[offsetof(struct amap_sol_cqe
, cid
) /
1741 32] & CQE_CID_MASK
));
1742 iscsi_conn_failure(beiscsi_conn
->conn
,
1743 ISCSI_ERR_CONN_FAILED
);
1746 SE_DEBUG(DBG_LVL_1
, "CQ Error Invalid code= %d "
1747 "received on CID 0x%x...\n",
1748 sol
->dw
[offsetof(struct amap_sol_cqe
, code
) /
1749 32] & CQE_CODE_MASK
,
1750 (sol
->dw
[offsetof(struct amap_sol_cqe
, cid
) /
1751 32] & CQE_CID_MASK
));
1755 AMAP_SET_BITS(struct amap_sol_cqe
, valid
, sol
, 0);
1757 sol
= queue_tail_node(cq
);
1761 if (num_processed
> 0) {
1762 tot_nump
+= num_processed
;
1763 hwi_ring_cq_db(phba
, cq
->id
, num_processed
, 1, 0);
1768 void beiscsi_process_all_cqs(struct work_struct
*work
)
1770 unsigned long flags
;
1771 struct hwi_controller
*phwi_ctrlr
;
1772 struct hwi_context_memory
*phwi_context
;
1773 struct be_eq_obj
*pbe_eq
;
1774 struct beiscsi_hba
*phba
=
1775 container_of(work
, struct beiscsi_hba
, work_cqs
);
1777 phwi_ctrlr
= phba
->phwi_ctrlr
;
1778 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
1779 if (phba
->msix_enabled
)
1780 pbe_eq
= &phwi_context
->be_eq
[phba
->num_cpus
];
1782 pbe_eq
= &phwi_context
->be_eq
[0];
1784 if (phba
->todo_mcc_cq
) {
1785 spin_lock_irqsave(&phba
->isr_lock
, flags
);
1786 phba
->todo_mcc_cq
= 0;
1787 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
1788 beiscsi_process_mcc_isr(phba
);
1791 if (phba
->todo_cq
) {
1792 spin_lock_irqsave(&phba
->isr_lock
, flags
);
1794 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
1795 beiscsi_process_cq(pbe_eq
);
1799 static int be_iopoll(struct blk_iopoll
*iop
, int budget
)
1801 static unsigned int ret
;
1802 struct beiscsi_hba
*phba
;
1803 struct be_eq_obj
*pbe_eq
;
1805 pbe_eq
= container_of(iop
, struct be_eq_obj
, iopoll
);
1806 ret
= beiscsi_process_cq(pbe_eq
);
1808 phba
= pbe_eq
->phba
;
1809 blk_iopoll_complete(iop
);
1810 SE_DEBUG(DBG_LVL_8
, "rearm pbe_eq->q.id =%d\n", pbe_eq
->q
.id
);
1811 hwi_ring_eq_db(phba
, pbe_eq
->q
.id
, 0, 0, 1, 1);
1817 hwi_write_sgl(struct iscsi_wrb
*pwrb
, struct scatterlist
*sg
,
1818 unsigned int num_sg
, struct beiscsi_io_task
*io_task
)
1820 struct iscsi_sge
*psgl
;
1821 unsigned short sg_len
, index
;
1822 unsigned int sge_len
= 0;
1823 unsigned long long addr
;
1824 struct scatterlist
*l_sg
;
1825 unsigned int offset
;
1827 AMAP_SET_BITS(struct amap_iscsi_wrb
, iscsi_bhs_addr_lo
, pwrb
,
1828 io_task
->bhs_pa
.u
.a32
.address_lo
);
1829 AMAP_SET_BITS(struct amap_iscsi_wrb
, iscsi_bhs_addr_hi
, pwrb
,
1830 io_task
->bhs_pa
.u
.a32
.address_hi
);
1833 for (index
= 0; (index
< num_sg
) && (index
< 2); index
++,
1836 sg_len
= sg_dma_len(sg
);
1837 addr
= (u64
) sg_dma_address(sg
);
1838 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_addr_lo
, pwrb
,
1839 ((u32
)(addr
& 0xFFFFFFFF)));
1840 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_addr_hi
, pwrb
,
1841 ((u32
)(addr
>> 32)));
1842 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_len
, pwrb
,
1846 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_r2t_offset
,
1848 sg_len
= sg_dma_len(sg
);
1849 addr
= (u64
) sg_dma_address(sg
);
1850 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_addr_lo
, pwrb
,
1851 ((u32
)(addr
& 0xFFFFFFFF)));
1852 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_addr_hi
, pwrb
,
1853 ((u32
)(addr
>> 32)));
1854 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_len
, pwrb
,
1858 psgl
= (struct iscsi_sge
*)io_task
->psgl_handle
->pfrag
;
1859 memset(psgl
, 0, sizeof(*psgl
) * BE2_SGE
);
1861 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, io_task
->bhs_len
- 2);
1863 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
1864 io_task
->bhs_pa
.u
.a32
.address_hi
);
1865 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
1866 io_task
->bhs_pa
.u
.a32
.address_lo
);
1869 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_last
, pwrb
,
1871 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_last
, pwrb
,
1873 } else if (num_sg
== 2) {
1874 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_last
, pwrb
,
1876 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_last
, pwrb
,
1879 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_last
, pwrb
,
1881 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_last
, pwrb
,
1888 for (index
= 0; index
< num_sg
; index
++, sg
= sg_next(sg
), psgl
++) {
1889 sg_len
= sg_dma_len(sg
);
1890 addr
= (u64
) sg_dma_address(sg
);
1891 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
1892 (addr
& 0xFFFFFFFF));
1893 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
1895 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, sg_len
);
1896 AMAP_SET_BITS(struct amap_iscsi_sge
, sge_offset
, psgl
, offset
);
1897 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 0);
1901 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 1);
1904 static void hwi_write_buffer(struct iscsi_wrb
*pwrb
, struct iscsi_task
*task
)
1906 struct iscsi_sge
*psgl
;
1907 unsigned long long addr
;
1908 struct beiscsi_io_task
*io_task
= task
->dd_data
;
1909 struct beiscsi_conn
*beiscsi_conn
= io_task
->conn
;
1910 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
1912 io_task
->bhs_len
= sizeof(struct be_nonio_bhs
) - 2;
1913 AMAP_SET_BITS(struct amap_iscsi_wrb
, iscsi_bhs_addr_lo
, pwrb
,
1914 io_task
->bhs_pa
.u
.a32
.address_lo
);
1915 AMAP_SET_BITS(struct amap_iscsi_wrb
, iscsi_bhs_addr_hi
, pwrb
,
1916 io_task
->bhs_pa
.u
.a32
.address_hi
);
1919 if (task
->data_count
) {
1920 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 1);
1921 addr
= (u64
) pci_map_single(phba
->pcidev
,
1923 task
->data_count
, 1);
1925 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 0);
1928 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_addr_lo
, pwrb
,
1929 ((u32
)(addr
& 0xFFFFFFFF)));
1930 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_addr_hi
, pwrb
,
1931 ((u32
)(addr
>> 32)));
1932 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_len
, pwrb
,
1935 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_last
, pwrb
, 1);
1937 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 0);
1941 psgl
= (struct iscsi_sge
*)io_task
->psgl_handle
->pfrag
;
1943 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, io_task
->bhs_len
);
1945 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
1946 io_task
->bhs_pa
.u
.a32
.address_hi
);
1947 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
1948 io_task
->bhs_pa
.u
.a32
.address_lo
);
1951 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
, 0);
1952 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
, 0);
1953 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, 0);
1954 AMAP_SET_BITS(struct amap_iscsi_sge
, sge_offset
, psgl
, 0);
1955 AMAP_SET_BITS(struct amap_iscsi_sge
, rsvd0
, psgl
, 0);
1956 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 0);
1960 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
1961 ((u32
)(addr
& 0xFFFFFFFF)));
1962 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
1963 ((u32
)(addr
>> 32)));
1965 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, 0x106);
1967 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 1);
1970 static void beiscsi_find_mem_req(struct beiscsi_hba
*phba
)
1972 unsigned int num_cq_pages
, num_async_pdu_buf_pages
;
1973 unsigned int num_async_pdu_data_pages
, wrb_sz_per_cxn
;
1974 unsigned int num_async_pdu_buf_sgl_pages
, num_async_pdu_data_sgl_pages
;
1976 num_cq_pages
= PAGES_REQUIRED(phba
->params
.num_cq_entries
* \
1977 sizeof(struct sol_cqe
));
1978 num_async_pdu_buf_pages
=
1979 PAGES_REQUIRED(phba
->params
.asyncpdus_per_ctrl
* \
1980 phba
->params
.defpdu_hdr_sz
);
1981 num_async_pdu_buf_sgl_pages
=
1982 PAGES_REQUIRED(phba
->params
.asyncpdus_per_ctrl
* \
1983 sizeof(struct phys_addr
));
1984 num_async_pdu_data_pages
=
1985 PAGES_REQUIRED(phba
->params
.asyncpdus_per_ctrl
* \
1986 phba
->params
.defpdu_data_sz
);
1987 num_async_pdu_data_sgl_pages
=
1988 PAGES_REQUIRED(phba
->params
.asyncpdus_per_ctrl
* \
1989 sizeof(struct phys_addr
));
1991 phba
->params
.hwi_ws_sz
= sizeof(struct hwi_controller
);
1993 phba
->mem_req
[ISCSI_MEM_GLOBAL_HEADER
] = 2 *
1994 BE_ISCSI_PDU_HEADER_SIZE
;
1995 phba
->mem_req
[HWI_MEM_ADDN_CONTEXT
] =
1996 sizeof(struct hwi_context_memory
);
1999 phba
->mem_req
[HWI_MEM_WRB
] = sizeof(struct iscsi_wrb
)
2000 * (phba
->params
.wrbs_per_cxn
)
2001 * phba
->params
.cxns_per_ctrl
;
2002 wrb_sz_per_cxn
= sizeof(struct wrb_handle
) *
2003 (phba
->params
.wrbs_per_cxn
);
2004 phba
->mem_req
[HWI_MEM_WRBH
] = roundup_pow_of_two((wrb_sz_per_cxn
) *
2005 phba
->params
.cxns_per_ctrl
);
2007 phba
->mem_req
[HWI_MEM_SGLH
] = sizeof(struct sgl_handle
) *
2008 phba
->params
.icds_per_ctrl
;
2009 phba
->mem_req
[HWI_MEM_SGE
] = sizeof(struct iscsi_sge
) *
2010 phba
->params
.num_sge_per_io
* phba
->params
.icds_per_ctrl
;
2012 phba
->mem_req
[HWI_MEM_ASYNC_HEADER_BUF
] =
2013 num_async_pdu_buf_pages
* PAGE_SIZE
;
2014 phba
->mem_req
[HWI_MEM_ASYNC_DATA_BUF
] =
2015 num_async_pdu_data_pages
* PAGE_SIZE
;
2016 phba
->mem_req
[HWI_MEM_ASYNC_HEADER_RING
] =
2017 num_async_pdu_buf_sgl_pages
* PAGE_SIZE
;
2018 phba
->mem_req
[HWI_MEM_ASYNC_DATA_RING
] =
2019 num_async_pdu_data_sgl_pages
* PAGE_SIZE
;
2020 phba
->mem_req
[HWI_MEM_ASYNC_HEADER_HANDLE
] =
2021 phba
->params
.asyncpdus_per_ctrl
*
2022 sizeof(struct async_pdu_handle
);
2023 phba
->mem_req
[HWI_MEM_ASYNC_DATA_HANDLE
] =
2024 phba
->params
.asyncpdus_per_ctrl
*
2025 sizeof(struct async_pdu_handle
);
2026 phba
->mem_req
[HWI_MEM_ASYNC_PDU_CONTEXT
] =
2027 sizeof(struct hwi_async_pdu_context
) +
2028 (phba
->params
.cxns_per_ctrl
* sizeof(struct hwi_async_entry
));
2031 static int beiscsi_alloc_mem(struct beiscsi_hba
*phba
)
2033 struct be_mem_descriptor
*mem_descr
;
2035 struct mem_array
*mem_arr
, *mem_arr_orig
;
2036 unsigned int i
, j
, alloc_size
, curr_alloc_size
;
2038 phba
->phwi_ctrlr
= kmalloc(phba
->params
.hwi_ws_sz
, GFP_KERNEL
);
2039 if (!phba
->phwi_ctrlr
)
2042 phba
->init_mem
= kcalloc(SE_MEM_MAX
, sizeof(*mem_descr
),
2044 if (!phba
->init_mem
) {
2045 kfree(phba
->phwi_ctrlr
);
2049 mem_arr_orig
= kmalloc(sizeof(*mem_arr_orig
) * BEISCSI_MAX_FRAGS_INIT
,
2051 if (!mem_arr_orig
) {
2052 kfree(phba
->init_mem
);
2053 kfree(phba
->phwi_ctrlr
);
2057 mem_descr
= phba
->init_mem
;
2058 for (i
= 0; i
< SE_MEM_MAX
; i
++) {
2060 mem_arr
= mem_arr_orig
;
2061 alloc_size
= phba
->mem_req
[i
];
2062 memset(mem_arr
, 0, sizeof(struct mem_array
) *
2063 BEISCSI_MAX_FRAGS_INIT
);
2064 curr_alloc_size
= min(be_max_phys_size
* 1024, alloc_size
);
2066 mem_arr
->virtual_address
= pci_alloc_consistent(
2070 if (!mem_arr
->virtual_address
) {
2071 if (curr_alloc_size
<= BE_MIN_MEM_SIZE
)
2073 if (curr_alloc_size
-
2074 rounddown_pow_of_two(curr_alloc_size
))
2075 curr_alloc_size
= rounddown_pow_of_two
2078 curr_alloc_size
= curr_alloc_size
/ 2;
2080 mem_arr
->bus_address
.u
.
2081 a64
.address
= (__u64
) bus_add
;
2082 mem_arr
->size
= curr_alloc_size
;
2083 alloc_size
-= curr_alloc_size
;
2084 curr_alloc_size
= min(be_max_phys_size
*
2089 } while (alloc_size
);
2090 mem_descr
->num_elements
= j
;
2091 mem_descr
->size_in_bytes
= phba
->mem_req
[i
];
2092 mem_descr
->mem_array
= kmalloc(sizeof(*mem_arr
) * j
,
2094 if (!mem_descr
->mem_array
)
2097 memcpy(mem_descr
->mem_array
, mem_arr_orig
,
2098 sizeof(struct mem_array
) * j
);
2101 kfree(mem_arr_orig
);
2104 mem_descr
->num_elements
= j
;
2105 while ((i
) || (j
)) {
2106 for (j
= mem_descr
->num_elements
; j
> 0; j
--) {
2107 pci_free_consistent(phba
->pcidev
,
2108 mem_descr
->mem_array
[j
- 1].size
,
2109 mem_descr
->mem_array
[j
- 1].
2111 (unsigned long)mem_descr
->
2113 bus_address
.u
.a64
.address
);
2117 kfree(mem_descr
->mem_array
);
2121 kfree(mem_arr_orig
);
2122 kfree(phba
->init_mem
);
2123 kfree(phba
->phwi_ctrlr
);
2127 static int beiscsi_get_memory(struct beiscsi_hba
*phba
)
2129 beiscsi_find_mem_req(phba
);
2130 return beiscsi_alloc_mem(phba
);
2133 static void iscsi_init_global_templates(struct beiscsi_hba
*phba
)
2135 struct pdu_data_out
*pdata_out
;
2136 struct pdu_nop_out
*pnop_out
;
2137 struct be_mem_descriptor
*mem_descr
;
2139 mem_descr
= phba
->init_mem
;
2140 mem_descr
+= ISCSI_MEM_GLOBAL_HEADER
;
2142 (struct pdu_data_out
*)mem_descr
->mem_array
[0].virtual_address
;
2143 memset(pdata_out
, 0, BE_ISCSI_PDU_HEADER_SIZE
);
2145 AMAP_SET_BITS(struct amap_pdu_data_out
, opcode
, pdata_out
,
2149 (struct pdu_nop_out
*)((unsigned char *)mem_descr
->mem_array
[0].
2150 virtual_address
+ BE_ISCSI_PDU_HEADER_SIZE
);
2152 memset(pnop_out
, 0, BE_ISCSI_PDU_HEADER_SIZE
);
2153 AMAP_SET_BITS(struct amap_pdu_nop_out
, ttt
, pnop_out
, 0xFFFFFFFF);
2154 AMAP_SET_BITS(struct amap_pdu_nop_out
, f_bit
, pnop_out
, 1);
2155 AMAP_SET_BITS(struct amap_pdu_nop_out
, i_bit
, pnop_out
, 0);
2158 static void beiscsi_init_wrb_handle(struct beiscsi_hba
*phba
)
2160 struct be_mem_descriptor
*mem_descr_wrbh
, *mem_descr_wrb
;
2161 struct wrb_handle
*pwrb_handle
;
2162 struct hwi_controller
*phwi_ctrlr
;
2163 struct hwi_wrb_context
*pwrb_context
;
2164 struct iscsi_wrb
*pwrb
;
2165 unsigned int num_cxn_wrbh
;
2166 unsigned int num_cxn_wrb
, j
, idx
, index
;
2168 mem_descr_wrbh
= phba
->init_mem
;
2169 mem_descr_wrbh
+= HWI_MEM_WRBH
;
2171 mem_descr_wrb
= phba
->init_mem
;
2172 mem_descr_wrb
+= HWI_MEM_WRB
;
2175 pwrb_handle
= mem_descr_wrbh
->mem_array
[idx
].virtual_address
;
2176 num_cxn_wrbh
= ((mem_descr_wrbh
->mem_array
[idx
].size
) /
2177 ((sizeof(struct wrb_handle
)) *
2178 phba
->params
.wrbs_per_cxn
));
2179 phwi_ctrlr
= phba
->phwi_ctrlr
;
2181 for (index
= 0; index
< phba
->params
.cxns_per_ctrl
* 2; index
+= 2) {
2182 pwrb_context
= &phwi_ctrlr
->wrb_context
[index
];
2183 pwrb_context
->pwrb_handle_base
=
2184 kzalloc(sizeof(struct wrb_handle
*) *
2185 phba
->params
.wrbs_per_cxn
, GFP_KERNEL
);
2186 pwrb_context
->pwrb_handle_basestd
=
2187 kzalloc(sizeof(struct wrb_handle
*) *
2188 phba
->params
.wrbs_per_cxn
, GFP_KERNEL
);
2190 pwrb_context
->alloc_index
= 0;
2191 pwrb_context
->wrb_handles_available
= 0;
2192 for (j
= 0; j
< phba
->params
.wrbs_per_cxn
; j
++) {
2193 pwrb_context
->pwrb_handle_base
[j
] = pwrb_handle
;
2194 pwrb_context
->pwrb_handle_basestd
[j
] =
2196 pwrb_context
->wrb_handles_available
++;
2197 pwrb_handle
->wrb_index
= j
;
2200 pwrb_context
->free_index
= 0;
2205 mem_descr_wrbh
->mem_array
[idx
].virtual_address
;
2207 ((mem_descr_wrbh
->mem_array
[idx
].size
) /
2208 ((sizeof(struct wrb_handle
)) *
2209 phba
->params
.wrbs_per_cxn
));
2210 pwrb_context
->alloc_index
= 0;
2211 for (j
= 0; j
< phba
->params
.wrbs_per_cxn
; j
++) {
2212 pwrb_context
->pwrb_handle_base
[j
] = pwrb_handle
;
2213 pwrb_context
->pwrb_handle_basestd
[j
] =
2215 pwrb_context
->wrb_handles_available
++;
2216 pwrb_handle
->wrb_index
= j
;
2219 pwrb_context
->free_index
= 0;
2224 pwrb
= mem_descr_wrb
->mem_array
[idx
].virtual_address
;
2225 num_cxn_wrb
= (mem_descr_wrb
->mem_array
[idx
].size
) /
2226 ((sizeof(struct iscsi_wrb
) *
2227 phba
->params
.wrbs_per_cxn
));
2228 for (index
= 0; index
< phba
->params
.cxns_per_ctrl
* 2; index
+= 2) {
2229 pwrb_context
= &phwi_ctrlr
->wrb_context
[index
];
2231 for (j
= 0; j
< phba
->params
.wrbs_per_cxn
; j
++) {
2232 pwrb_handle
= pwrb_context
->pwrb_handle_base
[j
];
2233 pwrb_handle
->pwrb
= pwrb
;
2239 pwrb
= mem_descr_wrb
->mem_array
[idx
].virtual_address
;
2240 num_cxn_wrb
= (mem_descr_wrb
->mem_array
[idx
].size
) /
2241 ((sizeof(struct iscsi_wrb
) *
2242 phba
->params
.wrbs_per_cxn
));
2243 for (j
= 0; j
< phba
->params
.wrbs_per_cxn
; j
++) {
2244 pwrb_handle
= pwrb_context
->pwrb_handle_base
[j
];
2245 pwrb_handle
->pwrb
= pwrb
;
2253 static void hwi_init_async_pdu_ctx(struct beiscsi_hba
*phba
)
2255 struct hwi_controller
*phwi_ctrlr
;
2256 struct hba_parameters
*p
= &phba
->params
;
2257 struct hwi_async_pdu_context
*pasync_ctx
;
2258 struct async_pdu_handle
*pasync_header_h
, *pasync_data_h
;
2260 struct be_mem_descriptor
*mem_descr
;
2262 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2263 mem_descr
+= HWI_MEM_ASYNC_PDU_CONTEXT
;
2265 phwi_ctrlr
= phba
->phwi_ctrlr
;
2266 phwi_ctrlr
->phwi_ctxt
->pasync_ctx
= (struct hwi_async_pdu_context
*)
2267 mem_descr
->mem_array
[0].virtual_address
;
2268 pasync_ctx
= phwi_ctrlr
->phwi_ctxt
->pasync_ctx
;
2269 memset(pasync_ctx
, 0, sizeof(*pasync_ctx
));
2271 pasync_ctx
->async_header
.num_entries
= p
->asyncpdus_per_ctrl
;
2272 pasync_ctx
->async_header
.buffer_size
= p
->defpdu_hdr_sz
;
2273 pasync_ctx
->async_data
.buffer_size
= p
->defpdu_data_sz
;
2274 pasync_ctx
->async_data
.num_entries
= p
->asyncpdus_per_ctrl
;
2276 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2277 mem_descr
+= HWI_MEM_ASYNC_HEADER_BUF
;
2278 if (mem_descr
->mem_array
[0].virtual_address
) {
2280 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2281 "va=%p\n", mem_descr
->mem_array
[0].virtual_address
);
2283 shost_printk(KERN_WARNING
, phba
->shost
,
2284 "No Virtual address\n");
2286 pasync_ctx
->async_header
.va_base
=
2287 mem_descr
->mem_array
[0].virtual_address
;
2289 pasync_ctx
->async_header
.pa_base
.u
.a64
.address
=
2290 mem_descr
->mem_array
[0].bus_address
.u
.a64
.address
;
2292 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2293 mem_descr
+= HWI_MEM_ASYNC_HEADER_RING
;
2294 if (mem_descr
->mem_array
[0].virtual_address
) {
2296 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2297 "va=%p\n", mem_descr
->mem_array
[0].virtual_address
);
2299 shost_printk(KERN_WARNING
, phba
->shost
,
2300 "No Virtual address\n");
2301 pasync_ctx
->async_header
.ring_base
=
2302 mem_descr
->mem_array
[0].virtual_address
;
2304 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2305 mem_descr
+= HWI_MEM_ASYNC_HEADER_HANDLE
;
2306 if (mem_descr
->mem_array
[0].virtual_address
) {
2308 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2309 "va=%p\n", mem_descr
->mem_array
[0].virtual_address
);
2311 shost_printk(KERN_WARNING
, phba
->shost
,
2312 "No Virtual address\n");
2314 pasync_ctx
->async_header
.handle_base
=
2315 mem_descr
->mem_array
[0].virtual_address
;
2316 pasync_ctx
->async_header
.writables
= 0;
2317 INIT_LIST_HEAD(&pasync_ctx
->async_header
.free_list
);
2319 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2320 mem_descr
+= HWI_MEM_ASYNC_DATA_BUF
;
2321 if (mem_descr
->mem_array
[0].virtual_address
) {
2323 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2324 "va=%p\n", mem_descr
->mem_array
[0].virtual_address
);
2326 shost_printk(KERN_WARNING
, phba
->shost
,
2327 "No Virtual address\n");
2328 pasync_ctx
->async_data
.va_base
=
2329 mem_descr
->mem_array
[0].virtual_address
;
2330 pasync_ctx
->async_data
.pa_base
.u
.a64
.address
=
2331 mem_descr
->mem_array
[0].bus_address
.u
.a64
.address
;
2333 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2334 mem_descr
+= HWI_MEM_ASYNC_DATA_RING
;
2335 if (mem_descr
->mem_array
[0].virtual_address
) {
2337 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2338 "va=%p\n", mem_descr
->mem_array
[0].virtual_address
);
2340 shost_printk(KERN_WARNING
, phba
->shost
,
2341 "No Virtual address\n");
2343 pasync_ctx
->async_data
.ring_base
=
2344 mem_descr
->mem_array
[0].virtual_address
;
2346 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2347 mem_descr
+= HWI_MEM_ASYNC_DATA_HANDLE
;
2348 if (!mem_descr
->mem_array
[0].virtual_address
)
2349 shost_printk(KERN_WARNING
, phba
->shost
,
2350 "No Virtual address\n");
2352 pasync_ctx
->async_data
.handle_base
=
2353 mem_descr
->mem_array
[0].virtual_address
;
2354 pasync_ctx
->async_data
.writables
= 0;
2355 INIT_LIST_HEAD(&pasync_ctx
->async_data
.free_list
);
2358 (struct async_pdu_handle
*)pasync_ctx
->async_header
.handle_base
;
2360 (struct async_pdu_handle
*)pasync_ctx
->async_data
.handle_base
;
2362 for (index
= 0; index
< p
->asyncpdus_per_ctrl
; index
++) {
2363 pasync_header_h
->cri
= -1;
2364 pasync_header_h
->index
= (char)index
;
2365 INIT_LIST_HEAD(&pasync_header_h
->link
);
2366 pasync_header_h
->pbuffer
=
2367 (void *)((unsigned long)
2368 (pasync_ctx
->async_header
.va_base
) +
2369 (p
->defpdu_hdr_sz
* index
));
2371 pasync_header_h
->pa
.u
.a64
.address
=
2372 pasync_ctx
->async_header
.pa_base
.u
.a64
.address
+
2373 (p
->defpdu_hdr_sz
* index
);
2375 list_add_tail(&pasync_header_h
->link
,
2376 &pasync_ctx
->async_header
.free_list
);
2378 pasync_ctx
->async_header
.free_entries
++;
2379 pasync_ctx
->async_header
.writables
++;
2381 INIT_LIST_HEAD(&pasync_ctx
->async_entry
[index
].wait_queue
.list
);
2382 INIT_LIST_HEAD(&pasync_ctx
->async_entry
[index
].
2384 pasync_data_h
->cri
= -1;
2385 pasync_data_h
->index
= (char)index
;
2386 INIT_LIST_HEAD(&pasync_data_h
->link
);
2387 pasync_data_h
->pbuffer
=
2388 (void *)((unsigned long)
2389 (pasync_ctx
->async_data
.va_base
) +
2390 (p
->defpdu_data_sz
* index
));
2392 pasync_data_h
->pa
.u
.a64
.address
=
2393 pasync_ctx
->async_data
.pa_base
.u
.a64
.address
+
2394 (p
->defpdu_data_sz
* index
);
2396 list_add_tail(&pasync_data_h
->link
,
2397 &pasync_ctx
->async_data
.free_list
);
2399 pasync_ctx
->async_data
.free_entries
++;
2400 pasync_ctx
->async_data
.writables
++;
2402 INIT_LIST_HEAD(&pasync_ctx
->async_entry
[index
].data_busy_list
);
2405 pasync_ctx
->async_header
.host_write_ptr
= 0;
2406 pasync_ctx
->async_header
.ep_read_ptr
= -1;
2407 pasync_ctx
->async_data
.host_write_ptr
= 0;
2408 pasync_ctx
->async_data
.ep_read_ptr
= -1;
2412 be_sgl_create_contiguous(void *virtual_address
,
2413 u64 physical_address
, u32 length
,
2414 struct be_dma_mem
*sgl
)
2416 WARN_ON(!virtual_address
);
2417 WARN_ON(!physical_address
);
2418 WARN_ON(!length
> 0);
2421 sgl
->va
= virtual_address
;
2422 sgl
->dma
= (unsigned long)physical_address
;
2428 static void be_sgl_destroy_contiguous(struct be_dma_mem
*sgl
)
2430 memset(sgl
, 0, sizeof(*sgl
));
2434 hwi_build_be_sgl_arr(struct beiscsi_hba
*phba
,
2435 struct mem_array
*pmem
, struct be_dma_mem
*sgl
)
2438 be_sgl_destroy_contiguous(sgl
);
2440 be_sgl_create_contiguous(pmem
->virtual_address
,
2441 pmem
->bus_address
.u
.a64
.address
,
2446 hwi_build_be_sgl_by_offset(struct beiscsi_hba
*phba
,
2447 struct mem_array
*pmem
, struct be_dma_mem
*sgl
)
2450 be_sgl_destroy_contiguous(sgl
);
2452 be_sgl_create_contiguous((unsigned char *)pmem
->virtual_address
,
2453 pmem
->bus_address
.u
.a64
.address
,
2457 static int be_fill_queue(struct be_queue_info
*q
,
2458 u16 len
, u16 entry_size
, void *vaddress
)
2460 struct be_dma_mem
*mem
= &q
->dma_mem
;
2462 memset(q
, 0, sizeof(*q
));
2464 q
->entry_size
= entry_size
;
2465 mem
->size
= len
* entry_size
;
2469 memset(mem
->va
, 0, mem
->size
);
2473 static int beiscsi_create_eqs(struct beiscsi_hba
*phba
,
2474 struct hwi_context_memory
*phwi_context
)
2476 unsigned int i
, num_eq_pages
;
2477 int ret
, eq_for_mcc
;
2478 struct be_queue_info
*eq
;
2479 struct be_dma_mem
*mem
;
2483 num_eq_pages
= PAGES_REQUIRED(phba
->params
.num_eq_entries
* \
2484 sizeof(struct be_eq_entry
));
2486 if (phba
->msix_enabled
)
2490 for (i
= 0; i
< (phba
->num_cpus
+ eq_for_mcc
); i
++) {
2491 eq
= &phwi_context
->be_eq
[i
].q
;
2493 phwi_context
->be_eq
[i
].phba
= phba
;
2494 eq_vaddress
= pci_alloc_consistent(phba
->pcidev
,
2495 num_eq_pages
* PAGE_SIZE
,
2498 goto create_eq_error
;
2500 mem
->va
= eq_vaddress
;
2501 ret
= be_fill_queue(eq
, phba
->params
.num_eq_entries
,
2502 sizeof(struct be_eq_entry
), eq_vaddress
);
2504 shost_printk(KERN_ERR
, phba
->shost
,
2505 "be_fill_queue Failed for EQ\n");
2506 goto create_eq_error
;
2510 ret
= beiscsi_cmd_eq_create(&phba
->ctrl
, eq
,
2511 phwi_context
->cur_eqd
);
2513 shost_printk(KERN_ERR
, phba
->shost
,
2514 "beiscsi_cmd_eq_create"
2516 goto create_eq_error
;
2518 SE_DEBUG(DBG_LVL_8
, "eqid = %d\n", phwi_context
->be_eq
[i
].q
.id
);
2522 for (i
= 0; i
< (phba
->num_cpus
+ 1); i
++) {
2523 eq
= &phwi_context
->be_eq
[i
].q
;
2526 pci_free_consistent(phba
->pcidev
, num_eq_pages
2533 static int beiscsi_create_cqs(struct beiscsi_hba
*phba
,
2534 struct hwi_context_memory
*phwi_context
)
2536 unsigned int i
, num_cq_pages
;
2538 struct be_queue_info
*cq
, *eq
;
2539 struct be_dma_mem
*mem
;
2540 struct be_eq_obj
*pbe_eq
;
2544 num_cq_pages
= PAGES_REQUIRED(phba
->params
.num_cq_entries
* \
2545 sizeof(struct sol_cqe
));
2547 for (i
= 0; i
< phba
->num_cpus
; i
++) {
2548 cq
= &phwi_context
->be_cq
[i
];
2549 eq
= &phwi_context
->be_eq
[i
].q
;
2550 pbe_eq
= &phwi_context
->be_eq
[i
];
2552 pbe_eq
->phba
= phba
;
2554 cq_vaddress
= pci_alloc_consistent(phba
->pcidev
,
2555 num_cq_pages
* PAGE_SIZE
,
2558 goto create_cq_error
;
2559 ret
= be_fill_queue(cq
, phba
->params
.num_cq_entries
,
2560 sizeof(struct sol_cqe
), cq_vaddress
);
2562 shost_printk(KERN_ERR
, phba
->shost
,
2563 "be_fill_queue Failed for ISCSI CQ\n");
2564 goto create_cq_error
;
2568 ret
= beiscsi_cmd_cq_create(&phba
->ctrl
, cq
, eq
, false,
2571 shost_printk(KERN_ERR
, phba
->shost
,
2572 "beiscsi_cmd_eq_create"
2573 "Failed for ISCSI CQ\n");
2574 goto create_cq_error
;
2576 SE_DEBUG(DBG_LVL_8
, "iscsi cq_id is %d for eq_id %d\n",
2578 SE_DEBUG(DBG_LVL_8
, "ISCSI CQ CREATED\n");
2583 for (i
= 0; i
< phba
->num_cpus
; i
++) {
2584 cq
= &phwi_context
->be_cq
[i
];
2587 pci_free_consistent(phba
->pcidev
, num_cq_pages
2596 beiscsi_create_def_hdr(struct beiscsi_hba
*phba
,
2597 struct hwi_context_memory
*phwi_context
,
2598 struct hwi_controller
*phwi_ctrlr
,
2599 unsigned int def_pdu_ring_sz
)
2603 struct be_queue_info
*dq
, *cq
;
2604 struct be_dma_mem
*mem
;
2605 struct be_mem_descriptor
*mem_descr
;
2609 dq
= &phwi_context
->be_def_hdrq
;
2610 cq
= &phwi_context
->be_cq
[0];
2612 mem_descr
= phba
->init_mem
;
2613 mem_descr
+= HWI_MEM_ASYNC_HEADER_RING
;
2614 dq_vaddress
= mem_descr
->mem_array
[idx
].virtual_address
;
2615 ret
= be_fill_queue(dq
, mem_descr
->mem_array
[0].size
/
2616 sizeof(struct phys_addr
),
2617 sizeof(struct phys_addr
), dq_vaddress
);
2619 shost_printk(KERN_ERR
, phba
->shost
,
2620 "be_fill_queue Failed for DEF PDU HDR\n");
2623 mem
->dma
= (unsigned long)mem_descr
->mem_array
[idx
].
2624 bus_address
.u
.a64
.address
;
2625 ret
= be_cmd_create_default_pdu_queue(&phba
->ctrl
, cq
, dq
,
2627 phba
->params
.defpdu_hdr_sz
);
2629 shost_printk(KERN_ERR
, phba
->shost
,
2630 "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2633 phwi_ctrlr
->default_pdu_hdr
.id
= phwi_context
->be_def_hdrq
.id
;
2634 SE_DEBUG(DBG_LVL_8
, "iscsi def pdu id is %d\n",
2635 phwi_context
->be_def_hdrq
.id
);
2636 hwi_post_async_buffers(phba
, 1);
2641 beiscsi_create_def_data(struct beiscsi_hba
*phba
,
2642 struct hwi_context_memory
*phwi_context
,
2643 struct hwi_controller
*phwi_ctrlr
,
2644 unsigned int def_pdu_ring_sz
)
2648 struct be_queue_info
*dataq
, *cq
;
2649 struct be_dma_mem
*mem
;
2650 struct be_mem_descriptor
*mem_descr
;
2654 dataq
= &phwi_context
->be_def_dataq
;
2655 cq
= &phwi_context
->be_cq
[0];
2656 mem
= &dataq
->dma_mem
;
2657 mem_descr
= phba
->init_mem
;
2658 mem_descr
+= HWI_MEM_ASYNC_DATA_RING
;
2659 dq_vaddress
= mem_descr
->mem_array
[idx
].virtual_address
;
2660 ret
= be_fill_queue(dataq
, mem_descr
->mem_array
[0].size
/
2661 sizeof(struct phys_addr
),
2662 sizeof(struct phys_addr
), dq_vaddress
);
2664 shost_printk(KERN_ERR
, phba
->shost
,
2665 "be_fill_queue Failed for DEF PDU DATA\n");
2668 mem
->dma
= (unsigned long)mem_descr
->mem_array
[idx
].
2669 bus_address
.u
.a64
.address
;
2670 ret
= be_cmd_create_default_pdu_queue(&phba
->ctrl
, cq
, dataq
,
2672 phba
->params
.defpdu_data_sz
);
2674 shost_printk(KERN_ERR
, phba
->shost
,
2675 "be_cmd_create_default_pdu_queue Failed"
2676 " for DEF PDU DATA\n");
2679 phwi_ctrlr
->default_pdu_data
.id
= phwi_context
->be_def_dataq
.id
;
2680 SE_DEBUG(DBG_LVL_8
, "iscsi def data id is %d\n",
2681 phwi_context
->be_def_dataq
.id
);
2682 hwi_post_async_buffers(phba
, 0);
2683 SE_DEBUG(DBG_LVL_8
, "DEFAULT PDU DATA RING CREATED\n");
2688 beiscsi_post_pages(struct beiscsi_hba
*phba
)
2690 struct be_mem_descriptor
*mem_descr
;
2691 struct mem_array
*pm_arr
;
2692 unsigned int page_offset
, i
;
2693 struct be_dma_mem sgl
;
2696 mem_descr
= phba
->init_mem
;
2697 mem_descr
+= HWI_MEM_SGE
;
2698 pm_arr
= mem_descr
->mem_array
;
2700 page_offset
= (sizeof(struct iscsi_sge
) * phba
->params
.num_sge_per_io
*
2701 phba
->fw_config
.iscsi_icd_start
) / PAGE_SIZE
;
2702 for (i
= 0; i
< mem_descr
->num_elements
; i
++) {
2703 hwi_build_be_sgl_arr(phba
, pm_arr
, &sgl
);
2704 status
= be_cmd_iscsi_post_sgl_pages(&phba
->ctrl
, &sgl
,
2706 (pm_arr
->size
/ PAGE_SIZE
));
2707 page_offset
+= pm_arr
->size
/ PAGE_SIZE
;
2709 shost_printk(KERN_ERR
, phba
->shost
,
2710 "post sgl failed.\n");
2715 SE_DEBUG(DBG_LVL_8
, "POSTED PAGES\n");
2719 static void be_queue_free(struct beiscsi_hba
*phba
, struct be_queue_info
*q
)
2721 struct be_dma_mem
*mem
= &q
->dma_mem
;
2723 pci_free_consistent(phba
->pcidev
, mem
->size
,
2727 static int be_queue_alloc(struct beiscsi_hba
*phba
, struct be_queue_info
*q
,
2728 u16 len
, u16 entry_size
)
2730 struct be_dma_mem
*mem
= &q
->dma_mem
;
2732 memset(q
, 0, sizeof(*q
));
2734 q
->entry_size
= entry_size
;
2735 mem
->size
= len
* entry_size
;
2736 mem
->va
= pci_alloc_consistent(phba
->pcidev
, mem
->size
, &mem
->dma
);
2739 memset(mem
->va
, 0, mem
->size
);
2744 beiscsi_create_wrb_rings(struct beiscsi_hba
*phba
,
2745 struct hwi_context_memory
*phwi_context
,
2746 struct hwi_controller
*phwi_ctrlr
)
2748 unsigned int wrb_mem_index
, offset
, size
, num_wrb_rings
;
2750 unsigned int idx
, num
, i
;
2751 struct mem_array
*pwrb_arr
;
2753 struct be_dma_mem sgl
;
2754 struct be_mem_descriptor
*mem_descr
;
2758 mem_descr
= phba
->init_mem
;
2759 mem_descr
+= HWI_MEM_WRB
;
2760 pwrb_arr
= kmalloc(sizeof(*pwrb_arr
) * phba
->params
.cxns_per_ctrl
,
2763 shost_printk(KERN_ERR
, phba
->shost
,
2764 "Memory alloc failed in create wrb ring.\n");
2767 wrb_vaddr
= mem_descr
->mem_array
[idx
].virtual_address
;
2768 pa_addr_lo
= mem_descr
->mem_array
[idx
].bus_address
.u
.a64
.address
;
2769 num_wrb_rings
= mem_descr
->mem_array
[idx
].size
/
2770 (phba
->params
.wrbs_per_cxn
* sizeof(struct iscsi_wrb
));
2772 for (num
= 0; num
< phba
->params
.cxns_per_ctrl
; num
++) {
2773 if (num_wrb_rings
) {
2774 pwrb_arr
[num
].virtual_address
= wrb_vaddr
;
2775 pwrb_arr
[num
].bus_address
.u
.a64
.address
= pa_addr_lo
;
2776 pwrb_arr
[num
].size
= phba
->params
.wrbs_per_cxn
*
2777 sizeof(struct iscsi_wrb
);
2778 wrb_vaddr
+= pwrb_arr
[num
].size
;
2779 pa_addr_lo
+= pwrb_arr
[num
].size
;
2783 wrb_vaddr
= mem_descr
->mem_array
[idx
].virtual_address
;
2784 pa_addr_lo
= mem_descr
->mem_array
[idx
].\
2785 bus_address
.u
.a64
.address
;
2786 num_wrb_rings
= mem_descr
->mem_array
[idx
].size
/
2787 (phba
->params
.wrbs_per_cxn
*
2788 sizeof(struct iscsi_wrb
));
2789 pwrb_arr
[num
].virtual_address
= wrb_vaddr
;
2790 pwrb_arr
[num
].bus_address
.u
.a64
.address\
2792 pwrb_arr
[num
].size
= phba
->params
.wrbs_per_cxn
*
2793 sizeof(struct iscsi_wrb
);
2794 wrb_vaddr
+= pwrb_arr
[num
].size
;
2795 pa_addr_lo
+= pwrb_arr
[num
].size
;
2799 for (i
= 0; i
< phba
->params
.cxns_per_ctrl
; i
++) {
2804 hwi_build_be_sgl_by_offset(phba
, &pwrb_arr
[i
], &sgl
);
2805 status
= be_cmd_wrbq_create(&phba
->ctrl
, &sgl
,
2806 &phwi_context
->be_wrbq
[i
]);
2808 shost_printk(KERN_ERR
, phba
->shost
,
2809 "wrbq create failed.");
2813 phwi_ctrlr
->wrb_context
[i
* 2].cid
= phwi_context
->be_wrbq
[i
].
2820 static void free_wrb_handles(struct beiscsi_hba
*phba
)
2823 struct hwi_controller
*phwi_ctrlr
;
2824 struct hwi_wrb_context
*pwrb_context
;
2826 phwi_ctrlr
= phba
->phwi_ctrlr
;
2827 for (index
= 0; index
< phba
->params
.cxns_per_ctrl
* 2; index
+= 2) {
2828 pwrb_context
= &phwi_ctrlr
->wrb_context
[index
];
2829 kfree(pwrb_context
->pwrb_handle_base
);
2830 kfree(pwrb_context
->pwrb_handle_basestd
);
2834 static void be_mcc_queues_destroy(struct beiscsi_hba
*phba
)
2836 struct be_queue_info
*q
;
2837 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
2839 q
= &phba
->ctrl
.mcc_obj
.q
;
2841 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_MCCQ
);
2842 be_queue_free(phba
, q
);
2844 q
= &phba
->ctrl
.mcc_obj
.cq
;
2846 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_CQ
);
2847 be_queue_free(phba
, q
);
2850 static void hwi_cleanup(struct beiscsi_hba
*phba
)
2852 struct be_queue_info
*q
;
2853 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
2854 struct hwi_controller
*phwi_ctrlr
;
2855 struct hwi_context_memory
*phwi_context
;
2858 phwi_ctrlr
= phba
->phwi_ctrlr
;
2859 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
2860 for (i
= 0; i
< phba
->params
.cxns_per_ctrl
; i
++) {
2861 q
= &phwi_context
->be_wrbq
[i
];
2863 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_WRBQ
);
2865 free_wrb_handles(phba
);
2867 q
= &phwi_context
->be_def_hdrq
;
2869 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_DPDUQ
);
2871 q
= &phwi_context
->be_def_dataq
;
2873 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_DPDUQ
);
2875 beiscsi_cmd_q_destroy(ctrl
, NULL
, QTYPE_SGL
);
2877 for (i
= 0; i
< (phba
->num_cpus
); i
++) {
2878 q
= &phwi_context
->be_cq
[i
];
2880 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_CQ
);
2882 if (phba
->msix_enabled
)
2886 for (i
= 0; i
< (phba
->num_cpus
+ eq_num
); i
++) {
2887 q
= &phwi_context
->be_eq
[i
].q
;
2889 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_EQ
);
2891 be_mcc_queues_destroy(phba
);
2894 static int be_mcc_queues_create(struct beiscsi_hba
*phba
,
2895 struct hwi_context_memory
*phwi_context
)
2897 struct be_queue_info
*q
, *cq
;
2898 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
2900 /* Alloc MCC compl queue */
2901 cq
= &phba
->ctrl
.mcc_obj
.cq
;
2902 if (be_queue_alloc(phba
, cq
, MCC_CQ_LEN
,
2903 sizeof(struct be_mcc_compl
)))
2905 /* Ask BE to create MCC compl queue; */
2906 if (phba
->msix_enabled
) {
2907 if (beiscsi_cmd_cq_create(ctrl
, cq
, &phwi_context
->be_eq
2908 [phba
->num_cpus
].q
, false, true, 0))
2911 if (beiscsi_cmd_cq_create(ctrl
, cq
, &phwi_context
->be_eq
[0].q
,
2916 /* Alloc MCC queue */
2917 q
= &phba
->ctrl
.mcc_obj
.q
;
2918 if (be_queue_alloc(phba
, q
, MCC_Q_LEN
, sizeof(struct be_mcc_wrb
)))
2919 goto mcc_cq_destroy
;
2921 /* Ask BE to create MCC queue */
2922 if (beiscsi_cmd_mccq_create(phba
, q
, cq
))
2928 be_queue_free(phba
, q
);
2930 beiscsi_cmd_q_destroy(ctrl
, cq
, QTYPE_CQ
);
2932 be_queue_free(phba
, cq
);
2937 static int find_num_cpus(void)
2941 num_cpus
= num_online_cpus();
2942 if (num_cpus
>= MAX_CPUS
)
2943 num_cpus
= MAX_CPUS
- 1;
2945 SE_DEBUG(DBG_LVL_8
, "num_cpus = %d\n", num_cpus
);
2949 static int hwi_init_port(struct beiscsi_hba
*phba
)
2951 struct hwi_controller
*phwi_ctrlr
;
2952 struct hwi_context_memory
*phwi_context
;
2953 unsigned int def_pdu_ring_sz
;
2954 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
2958 phba
->params
.asyncpdus_per_ctrl
* sizeof(struct phys_addr
);
2959 phwi_ctrlr
= phba
->phwi_ctrlr
;
2960 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
2961 phwi_context
->max_eqd
= 0;
2962 phwi_context
->min_eqd
= 0;
2963 phwi_context
->cur_eqd
= 64;
2964 be_cmd_fw_initialize(&phba
->ctrl
);
2966 status
= beiscsi_create_eqs(phba
, phwi_context
);
2968 shost_printk(KERN_ERR
, phba
->shost
, "EQ not created\n");
2972 status
= be_mcc_queues_create(phba
, phwi_context
);
2976 status
= mgmt_check_supported_fw(ctrl
, phba
);
2978 shost_printk(KERN_ERR
, phba
->shost
,
2979 "Unsupported fw version\n");
2983 status
= beiscsi_create_cqs(phba
, phwi_context
);
2985 shost_printk(KERN_ERR
, phba
->shost
, "CQ not created\n");
2989 status
= beiscsi_create_def_hdr(phba
, phwi_context
, phwi_ctrlr
,
2992 shost_printk(KERN_ERR
, phba
->shost
,
2993 "Default Header not created\n");
2997 status
= beiscsi_create_def_data(phba
, phwi_context
,
2998 phwi_ctrlr
, def_pdu_ring_sz
);
3000 shost_printk(KERN_ERR
, phba
->shost
,
3001 "Default Data not created\n");
3005 status
= beiscsi_post_pages(phba
);
3007 shost_printk(KERN_ERR
, phba
->shost
, "Post SGL Pages Failed\n");
3011 status
= beiscsi_create_wrb_rings(phba
, phwi_context
, phwi_ctrlr
);
3013 shost_printk(KERN_ERR
, phba
->shost
,
3014 "WRB Rings not created\n");
3018 SE_DEBUG(DBG_LVL_8
, "hwi_init_port success\n");
3022 shost_printk(KERN_ERR
, phba
->shost
, "hwi_init_port failed");
3027 static int hwi_init_controller(struct beiscsi_hba
*phba
)
3029 struct hwi_controller
*phwi_ctrlr
;
3031 phwi_ctrlr
= phba
->phwi_ctrlr
;
3032 if (1 == phba
->init_mem
[HWI_MEM_ADDN_CONTEXT
].num_elements
) {
3033 phwi_ctrlr
->phwi_ctxt
= (struct hwi_context_memory
*)phba
->
3034 init_mem
[HWI_MEM_ADDN_CONTEXT
].mem_array
[0].virtual_address
;
3035 SE_DEBUG(DBG_LVL_8
, " phwi_ctrlr->phwi_ctxt=%p\n",
3036 phwi_ctrlr
->phwi_ctxt
);
3038 shost_printk(KERN_ERR
, phba
->shost
,
3039 "HWI_MEM_ADDN_CONTEXT is more than one element."
3040 "Failing to load\n");
3044 iscsi_init_global_templates(phba
);
3045 beiscsi_init_wrb_handle(phba
);
3046 hwi_init_async_pdu_ctx(phba
);
3047 if (hwi_init_port(phba
) != 0) {
3048 shost_printk(KERN_ERR
, phba
->shost
,
3049 "hwi_init_controller failed\n");
3055 static void beiscsi_free_mem(struct beiscsi_hba
*phba
)
3057 struct be_mem_descriptor
*mem_descr
;
3060 mem_descr
= phba
->init_mem
;
3063 for (i
= 0; i
< SE_MEM_MAX
; i
++) {
3064 for (j
= mem_descr
->num_elements
; j
> 0; j
--) {
3065 pci_free_consistent(phba
->pcidev
,
3066 mem_descr
->mem_array
[j
- 1].size
,
3067 mem_descr
->mem_array
[j
- 1].virtual_address
,
3068 (unsigned long)mem_descr
->mem_array
[j
- 1].
3069 bus_address
.u
.a64
.address
);
3071 kfree(mem_descr
->mem_array
);
3074 kfree(phba
->init_mem
);
3075 kfree(phba
->phwi_ctrlr
);
3078 static int beiscsi_init_controller(struct beiscsi_hba
*phba
)
3082 ret
= beiscsi_get_memory(phba
);
3084 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe -"
3085 "Failed in beiscsi_alloc_memory\n");
3089 ret
= hwi_init_controller(phba
);
3092 SE_DEBUG(DBG_LVL_8
, "Return success from beiscsi_init_controller");
3096 beiscsi_free_mem(phba
);
3100 static int beiscsi_init_sgl_handle(struct beiscsi_hba
*phba
)
3102 struct be_mem_descriptor
*mem_descr_sglh
, *mem_descr_sg
;
3103 struct sgl_handle
*psgl_handle
;
3104 struct iscsi_sge
*pfrag
;
3105 unsigned int arr_index
, i
, idx
;
3107 phba
->io_sgl_hndl_avbl
= 0;
3108 phba
->eh_sgl_hndl_avbl
= 0;
3110 mem_descr_sglh
= phba
->init_mem
;
3111 mem_descr_sglh
+= HWI_MEM_SGLH
;
3112 if (1 == mem_descr_sglh
->num_elements
) {
3113 phba
->io_sgl_hndl_base
= kzalloc(sizeof(struct sgl_handle
*) *
3114 phba
->params
.ios_per_ctrl
,
3116 if (!phba
->io_sgl_hndl_base
) {
3117 shost_printk(KERN_ERR
, phba
->shost
,
3118 "Mem Alloc Failed. Failing to load\n");
3121 phba
->eh_sgl_hndl_base
= kzalloc(sizeof(struct sgl_handle
*) *
3122 (phba
->params
.icds_per_ctrl
-
3123 phba
->params
.ios_per_ctrl
),
3125 if (!phba
->eh_sgl_hndl_base
) {
3126 kfree(phba
->io_sgl_hndl_base
);
3127 shost_printk(KERN_ERR
, phba
->shost
,
3128 "Mem Alloc Failed. Failing to load\n");
3132 shost_printk(KERN_ERR
, phba
->shost
,
3133 "HWI_MEM_SGLH is more than one element."
3134 "Failing to load\n");
3140 while (idx
< mem_descr_sglh
->num_elements
) {
3141 psgl_handle
= mem_descr_sglh
->mem_array
[idx
].virtual_address
;
3143 for (i
= 0; i
< (mem_descr_sglh
->mem_array
[idx
].size
/
3144 sizeof(struct sgl_handle
)); i
++) {
3145 if (arr_index
< phba
->params
.ios_per_ctrl
) {
3146 phba
->io_sgl_hndl_base
[arr_index
] = psgl_handle
;
3147 phba
->io_sgl_hndl_avbl
++;
3150 phba
->eh_sgl_hndl_base
[arr_index
-
3151 phba
->params
.ios_per_ctrl
] =
3154 phba
->eh_sgl_hndl_avbl
++;
3161 "phba->io_sgl_hndl_avbl=%d"
3162 "phba->eh_sgl_hndl_avbl=%d\n",
3163 phba
->io_sgl_hndl_avbl
,
3164 phba
->eh_sgl_hndl_avbl
);
3165 mem_descr_sg
= phba
->init_mem
;
3166 mem_descr_sg
+= HWI_MEM_SGE
;
3167 SE_DEBUG(DBG_LVL_8
, "\n mem_descr_sg->num_elements=%d\n",
3168 mem_descr_sg
->num_elements
);
3171 while (idx
< mem_descr_sg
->num_elements
) {
3172 pfrag
= mem_descr_sg
->mem_array
[idx
].virtual_address
;
3175 i
< (mem_descr_sg
->mem_array
[idx
].size
) /
3176 (sizeof(struct iscsi_sge
) * phba
->params
.num_sge_per_io
);
3178 if (arr_index
< phba
->params
.ios_per_ctrl
)
3179 psgl_handle
= phba
->io_sgl_hndl_base
[arr_index
];
3181 psgl_handle
= phba
->eh_sgl_hndl_base
[arr_index
-
3182 phba
->params
.ios_per_ctrl
];
3183 psgl_handle
->pfrag
= pfrag
;
3184 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, pfrag
, 0);
3185 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, pfrag
, 0);
3186 pfrag
+= phba
->params
.num_sge_per_io
;
3187 psgl_handle
->sgl_index
=
3188 phba
->fw_config
.iscsi_icd_start
+ arr_index
++;
3192 phba
->io_sgl_free_index
= 0;
3193 phba
->io_sgl_alloc_index
= 0;
3194 phba
->eh_sgl_free_index
= 0;
3195 phba
->eh_sgl_alloc_index
= 0;
3199 static int hba_setup_cid_tbls(struct beiscsi_hba
*phba
)
3203 phba
->cid_array
= kzalloc(sizeof(void *) * phba
->params
.cxns_per_ctrl
,
3205 if (!phba
->cid_array
) {
3206 shost_printk(KERN_ERR
, phba
->shost
,
3207 "Failed to allocate memory in "
3208 "hba_setup_cid_tbls\n");
3211 phba
->ep_array
= kzalloc(sizeof(struct iscsi_endpoint
*) *
3212 phba
->params
.cxns_per_ctrl
* 2, GFP_KERNEL
);
3213 if (!phba
->ep_array
) {
3214 shost_printk(KERN_ERR
, phba
->shost
,
3215 "Failed to allocate memory in "
3216 "hba_setup_cid_tbls\n");
3217 kfree(phba
->cid_array
);
3220 new_cid
= phba
->fw_config
.iscsi_cid_start
;
3221 for (i
= 0; i
< phba
->params
.cxns_per_ctrl
; i
++) {
3222 phba
->cid_array
[i
] = new_cid
;
3225 phba
->avlbl_cids
= phba
->params
.cxns_per_ctrl
;
3229 static void hwi_enable_intr(struct beiscsi_hba
*phba
)
3231 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
3232 struct hwi_controller
*phwi_ctrlr
;
3233 struct hwi_context_memory
*phwi_context
;
3234 struct be_queue_info
*eq
;
3239 phwi_ctrlr
= phba
->phwi_ctrlr
;
3240 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
3242 addr
= (u8 __iomem
*) ((u8 __iomem
*) ctrl
->pcicfg
+
3243 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
);
3244 reg
= ioread32(addr
);
3245 SE_DEBUG(DBG_LVL_8
, "reg =x%08x\n", reg
);
3247 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
3249 reg
|= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
3250 SE_DEBUG(DBG_LVL_8
, "reg =x%08x addr=%p\n", reg
, addr
);
3251 iowrite32(reg
, addr
);
3252 if (!phba
->msix_enabled
) {
3253 eq
= &phwi_context
->be_eq
[0].q
;
3254 SE_DEBUG(DBG_LVL_8
, "eq->id=%d\n", eq
->id
);
3255 hwi_ring_eq_db(phba
, eq
->id
, 0, 0, 1, 1);
3257 for (i
= 0; i
<= phba
->num_cpus
; i
++) {
3258 eq
= &phwi_context
->be_eq
[i
].q
;
3259 SE_DEBUG(DBG_LVL_8
, "eq->id=%d\n", eq
->id
);
3260 hwi_ring_eq_db(phba
, eq
->id
, 0, 0, 1, 1);
3266 static void hwi_disable_intr(struct beiscsi_hba
*phba
)
3268 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
3270 u8 __iomem
*addr
= ctrl
->pcicfg
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
;
3271 u32 reg
= ioread32(addr
);
3273 u32 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
3275 reg
&= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
3276 iowrite32(reg
, addr
);
3278 shost_printk(KERN_WARNING
, phba
->shost
,
3279 "In hwi_disable_intr, Already Disabled\n");
3282 static int beiscsi_init_port(struct beiscsi_hba
*phba
)
3286 ret
= beiscsi_init_controller(phba
);
3288 shost_printk(KERN_ERR
, phba
->shost
,
3289 "beiscsi_dev_probe - Failed in"
3290 "beiscsi_init_controller\n");
3293 ret
= beiscsi_init_sgl_handle(phba
);
3295 shost_printk(KERN_ERR
, phba
->shost
,
3296 "beiscsi_dev_probe - Failed in"
3297 "beiscsi_init_sgl_handle\n");
3298 goto do_cleanup_ctrlr
;
3301 if (hba_setup_cid_tbls(phba
)) {
3302 shost_printk(KERN_ERR
, phba
->shost
,
3303 "Failed in hba_setup_cid_tbls\n");
3304 kfree(phba
->io_sgl_hndl_base
);
3305 kfree(phba
->eh_sgl_hndl_base
);
3306 goto do_cleanup_ctrlr
;
3316 static void hwi_purge_eq(struct beiscsi_hba
*phba
)
3318 struct hwi_controller
*phwi_ctrlr
;
3319 struct hwi_context_memory
*phwi_context
;
3320 struct be_queue_info
*eq
;
3321 struct be_eq_entry
*eqe
= NULL
;
3323 unsigned int num_processed
;
3325 phwi_ctrlr
= phba
->phwi_ctrlr
;
3326 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
3327 if (phba
->msix_enabled
)
3332 for (i
= 0; i
< (phba
->num_cpus
+ eq_msix
); i
++) {
3333 eq
= &phwi_context
->be_eq
[i
].q
;
3334 eqe
= queue_tail_node(eq
);
3336 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
3338 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
3340 eqe
= queue_tail_node(eq
);
3345 hwi_ring_eq_db(phba
, eq
->id
, 1, num_processed
, 1, 1);
3349 static void beiscsi_clean_port(struct beiscsi_hba
*phba
)
3353 mgmt_status
= mgmt_epfw_cleanup(phba
, CMD_CONNECTION_CHUTE_0
);
3355 shost_printk(KERN_WARNING
, phba
->shost
,
3356 "mgmt_epfw_cleanup FAILED\n");
3360 kfree(phba
->io_sgl_hndl_base
);
3361 kfree(phba
->eh_sgl_hndl_base
);
3362 kfree(phba
->cid_array
);
3363 kfree(phba
->ep_array
);
3367 beiscsi_offload_connection(struct beiscsi_conn
*beiscsi_conn
,
3368 struct beiscsi_offload_params
*params
)
3370 struct wrb_handle
*pwrb_handle
;
3371 struct iscsi_target_context_update_wrb
*pwrb
= NULL
;
3372 struct be_mem_descriptor
*mem_descr
;
3373 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
3377 * We can always use 0 here because it is reserved by libiscsi for
3378 * login/startup related tasks.
3380 pwrb_handle
= alloc_wrb_handle(phba
, (beiscsi_conn
->beiscsi_conn_cid
-
3381 phba
->fw_config
.iscsi_cid_start
));
3382 pwrb
= (struct iscsi_target_context_update_wrb
*)pwrb_handle
->pwrb
;
3383 memset(pwrb
, 0, sizeof(*pwrb
));
3384 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
3385 max_burst_length
, pwrb
, params
->dw
[offsetof
3386 (struct amap_beiscsi_offload_params
,
3387 max_burst_length
) / 32]);
3388 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
3389 max_send_data_segment_length
, pwrb
,
3390 params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
3391 max_send_data_segment_length
) / 32]);
3392 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
3395 params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
3396 first_burst_length
) / 32]);
3398 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, erl
, pwrb
,
3399 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
3400 erl
) / 32] & OFFLD_PARAMS_ERL
));
3401 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, dde
, pwrb
,
3402 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
3403 dde
) / 32] & OFFLD_PARAMS_DDE
) >> 2);
3404 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, hde
, pwrb
,
3405 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
3406 hde
) / 32] & OFFLD_PARAMS_HDE
) >> 3);
3407 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, ir2t
, pwrb
,
3408 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
3409 ir2t
) / 32] & OFFLD_PARAMS_IR2T
) >> 4);
3410 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, imd
, pwrb
,
3411 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
3412 imd
) / 32] & OFFLD_PARAMS_IMD
) >> 5);
3413 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, stat_sn
,
3415 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
3416 exp_statsn
) / 32] + 1));
3417 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, type
, pwrb
,
3419 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, wrb_idx
,
3420 pwrb
, pwrb_handle
->wrb_index
);
3421 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, ptr2nextwrb
,
3422 pwrb
, pwrb_handle
->nxt_wrb_index
);
3423 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
3424 session_state
, pwrb
, 0);
3425 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, compltonack
,
3427 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, notpredblq
,
3429 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, mode
, pwrb
,
3432 mem_descr
= phba
->init_mem
;
3433 mem_descr
+= ISCSI_MEM_GLOBAL_HEADER
;
3435 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
3436 pad_buffer_addr_hi
, pwrb
,
3437 mem_descr
->mem_array
[0].bus_address
.u
.a32
.address_hi
);
3438 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
3439 pad_buffer_addr_lo
, pwrb
,
3440 mem_descr
->mem_array
[0].bus_address
.u
.a32
.address_lo
);
3442 be_dws_le_to_cpu(pwrb
, sizeof(struct iscsi_target_context_update_wrb
));
3444 doorbell
|= beiscsi_conn
->beiscsi_conn_cid
& DB_WRB_POST_CID_MASK
;
3445 doorbell
|= (pwrb_handle
->wrb_index
& DB_DEF_PDU_WRB_INDEX_MASK
)
3446 << DB_DEF_PDU_WRB_INDEX_SHIFT
;
3447 doorbell
|= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT
;
3449 iowrite32(doorbell
, phba
->db_va
+ DB_TXULP0_OFFSET
);
3452 static void beiscsi_parse_pdu(struct iscsi_conn
*conn
, itt_t itt
,
3453 int *index
, int *age
)
3457 *age
= conn
->session
->age
;
3461 * beiscsi_alloc_pdu - allocates pdu and related resources
3462 * @task: libiscsi task
3463 * @opcode: opcode of pdu for task
3465 * This is called with the session lock held. It will allocate
3466 * the wrb and sgl if needed for the command. And it will prep
3467 * the pdu's itt. beiscsi_parse_pdu will later translate
3468 * the pdu itt to the libiscsi task itt.
3470 static int beiscsi_alloc_pdu(struct iscsi_task
*task
, uint8_t opcode
)
3472 struct beiscsi_io_task
*io_task
= task
->dd_data
;
3473 struct iscsi_conn
*conn
= task
->conn
;
3474 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
3475 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
3476 struct hwi_wrb_context
*pwrb_context
;
3477 struct hwi_controller
*phwi_ctrlr
;
3479 struct beiscsi_session
*beiscsi_sess
= beiscsi_conn
->beiscsi_sess
;
3482 io_task
->cmd_bhs
= pci_pool_alloc(beiscsi_sess
->bhs_pool
,
3483 GFP_KERNEL
, &paddr
);
3484 if (!io_task
->cmd_bhs
)
3486 io_task
->bhs_pa
.u
.a64
.address
= paddr
;
3487 io_task
->libiscsi_itt
= (itt_t
)task
->itt
;
3488 io_task
->conn
= beiscsi_conn
;
3490 task
->hdr
= (struct iscsi_hdr
*)&io_task
->cmd_bhs
->iscsi_hdr
;
3491 task
->hdr_max
= sizeof(struct be_cmd_bhs
);
3492 io_task
->psgl_handle
= NULL
;
3493 io_task
->psgl_handle
= NULL
;
3496 spin_lock(&phba
->io_sgl_lock
);
3497 io_task
->psgl_handle
= alloc_io_sgl_handle(phba
);
3498 spin_unlock(&phba
->io_sgl_lock
);
3499 if (!io_task
->psgl_handle
)
3501 io_task
->pwrb_handle
= alloc_wrb_handle(phba
,
3502 beiscsi_conn
->beiscsi_conn_cid
-
3503 phba
->fw_config
.iscsi_cid_start
);
3504 if (!io_task
->pwrb_handle
)
3507 io_task
->scsi_cmnd
= NULL
;
3508 if ((opcode
& ISCSI_OPCODE_MASK
) == ISCSI_OP_LOGIN
) {
3509 if (!beiscsi_conn
->login_in_progress
) {
3510 spin_lock(&phba
->mgmt_sgl_lock
);
3511 io_task
->psgl_handle
= (struct sgl_handle
*)
3512 alloc_mgmt_sgl_handle(phba
);
3513 spin_unlock(&phba
->mgmt_sgl_lock
);
3514 if (!io_task
->psgl_handle
)
3517 beiscsi_conn
->login_in_progress
= 1;
3518 beiscsi_conn
->plogin_sgl_handle
=
3519 io_task
->psgl_handle
;
3520 io_task
->pwrb_handle
=
3521 alloc_wrb_handle(phba
,
3522 beiscsi_conn
->beiscsi_conn_cid
-
3523 phba
->fw_config
.iscsi_cid_start
);
3524 if (!io_task
->pwrb_handle
)
3526 beiscsi_conn
->plogin_wrb_handle
=
3527 io_task
->pwrb_handle
;
3530 io_task
->psgl_handle
=
3531 beiscsi_conn
->plogin_sgl_handle
;
3532 io_task
->pwrb_handle
=
3533 beiscsi_conn
->plogin_wrb_handle
;
3536 spin_lock(&phba
->mgmt_sgl_lock
);
3537 io_task
->psgl_handle
= alloc_mgmt_sgl_handle(phba
);
3538 spin_unlock(&phba
->mgmt_sgl_lock
);
3539 if (!io_task
->psgl_handle
)
3541 io_task
->pwrb_handle
=
3542 alloc_wrb_handle(phba
,
3543 beiscsi_conn
->beiscsi_conn_cid
-
3544 phba
->fw_config
.iscsi_cid_start
);
3545 if (!io_task
->pwrb_handle
)
3546 goto free_mgmt_hndls
;
3550 itt
= (itt_t
) cpu_to_be32(((unsigned int)io_task
->pwrb_handle
->
3551 wrb_index
<< 16) | (unsigned int)
3552 (io_task
->psgl_handle
->sgl_index
));
3553 io_task
->pwrb_handle
->pio_handle
= task
;
3555 io_task
->cmd_bhs
->iscsi_hdr
.itt
= itt
;
3559 spin_lock(&phba
->io_sgl_lock
);
3560 free_io_sgl_handle(phba
, io_task
->psgl_handle
);
3561 spin_unlock(&phba
->io_sgl_lock
);
3564 spin_lock(&phba
->mgmt_sgl_lock
);
3565 free_mgmt_sgl_handle(phba
, io_task
->psgl_handle
);
3566 spin_unlock(&phba
->mgmt_sgl_lock
);
3568 phwi_ctrlr
= phba
->phwi_ctrlr
;
3569 pwrb_context
= &phwi_ctrlr
->wrb_context
[
3570 beiscsi_conn
->beiscsi_conn_cid
-
3571 phba
->fw_config
.iscsi_cid_start
];
3572 if (io_task
->pwrb_handle
)
3573 free_wrb_handle(phba
, pwrb_context
, io_task
->pwrb_handle
);
3574 io_task
->pwrb_handle
= NULL
;
3575 pci_pool_free(beiscsi_sess
->bhs_pool
, io_task
->cmd_bhs
,
3576 io_task
->bhs_pa
.u
.a64
.address
);
3577 SE_DEBUG(DBG_LVL_1
, "Alloc of SGL_ICD Failed\n");
3581 static void beiscsi_cleanup_task(struct iscsi_task
*task
)
3583 struct beiscsi_io_task
*io_task
= task
->dd_data
;
3584 struct iscsi_conn
*conn
= task
->conn
;
3585 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
3586 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
3587 struct beiscsi_session
*beiscsi_sess
= beiscsi_conn
->beiscsi_sess
;
3588 struct hwi_wrb_context
*pwrb_context
;
3589 struct hwi_controller
*phwi_ctrlr
;
3591 phwi_ctrlr
= phba
->phwi_ctrlr
;
3592 pwrb_context
= &phwi_ctrlr
->wrb_context
[beiscsi_conn
->beiscsi_conn_cid
3593 - phba
->fw_config
.iscsi_cid_start
];
3594 if (io_task
->pwrb_handle
) {
3595 free_wrb_handle(phba
, pwrb_context
, io_task
->pwrb_handle
);
3596 io_task
->pwrb_handle
= NULL
;
3599 if (io_task
->cmd_bhs
) {
3600 pci_pool_free(beiscsi_sess
->bhs_pool
, io_task
->cmd_bhs
,
3601 io_task
->bhs_pa
.u
.a64
.address
);
3605 if (io_task
->psgl_handle
) {
3606 spin_lock(&phba
->io_sgl_lock
);
3607 free_io_sgl_handle(phba
, io_task
->psgl_handle
);
3608 spin_unlock(&phba
->io_sgl_lock
);
3609 io_task
->psgl_handle
= NULL
;
3612 if ((task
->hdr
->opcode
& ISCSI_OPCODE_MASK
) == ISCSI_OP_LOGIN
)
3614 if (io_task
->psgl_handle
) {
3615 spin_lock(&phba
->mgmt_sgl_lock
);
3616 free_mgmt_sgl_handle(phba
, io_task
->psgl_handle
);
3617 spin_unlock(&phba
->mgmt_sgl_lock
);
3618 io_task
->psgl_handle
= NULL
;
3623 static int beiscsi_iotask(struct iscsi_task
*task
, struct scatterlist
*sg
,
3624 unsigned int num_sg
, unsigned int xferlen
,
3625 unsigned int writedir
)
3628 struct beiscsi_io_task
*io_task
= task
->dd_data
;
3629 struct iscsi_conn
*conn
= task
->conn
;
3630 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
3631 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
3632 struct iscsi_wrb
*pwrb
= NULL
;
3633 unsigned int doorbell
= 0;
3635 pwrb
= io_task
->pwrb_handle
->pwrb
;
3636 io_task
->cmd_bhs
->iscsi_hdr
.exp_statsn
= 0;
3637 io_task
->bhs_len
= sizeof(struct be_cmd_bhs
);
3640 memset(&io_task
->cmd_bhs
->iscsi_data_pdu
, 0, 48);
3641 AMAP_SET_BITS(struct amap_pdu_data_out
, itt
,
3642 &io_task
->cmd_bhs
->iscsi_data_pdu
,
3643 (unsigned int)io_task
->cmd_bhs
->iscsi_hdr
.itt
);
3644 AMAP_SET_BITS(struct amap_pdu_data_out
, opcode
,
3645 &io_task
->cmd_bhs
->iscsi_data_pdu
,
3646 ISCSI_OPCODE_SCSI_DATA_OUT
);
3647 AMAP_SET_BITS(struct amap_pdu_data_out
, final_bit
,
3648 &io_task
->cmd_bhs
->iscsi_data_pdu
, 1);
3649 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
3651 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 1);
3653 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
3655 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 0);
3657 memcpy(&io_task
->cmd_bhs
->iscsi_data_pdu
.
3658 dw
[offsetof(struct amap_pdu_data_out
, lun
) / 32],
3659 io_task
->cmd_bhs
->iscsi_hdr
.lun
, sizeof(struct scsi_lun
));
3661 AMAP_SET_BITS(struct amap_iscsi_wrb
, lun
, pwrb
,
3662 cpu_to_be16((unsigned short)io_task
->cmd_bhs
->iscsi_hdr
.
3664 AMAP_SET_BITS(struct amap_iscsi_wrb
, r2t_exp_dtl
, pwrb
, xferlen
);
3665 AMAP_SET_BITS(struct amap_iscsi_wrb
, wrb_idx
, pwrb
,
3666 io_task
->pwrb_handle
->wrb_index
);
3667 AMAP_SET_BITS(struct amap_iscsi_wrb
, cmdsn_itt
, pwrb
,
3668 be32_to_cpu(task
->cmdsn
));
3669 AMAP_SET_BITS(struct amap_iscsi_wrb
, sgl_icd_idx
, pwrb
,
3670 io_task
->psgl_handle
->sgl_index
);
3672 hwi_write_sgl(pwrb
, sg
, num_sg
, io_task
);
3674 AMAP_SET_BITS(struct amap_iscsi_wrb
, ptr2nextwrb
, pwrb
,
3675 io_task
->pwrb_handle
->nxt_wrb_index
);
3676 be_dws_le_to_cpu(pwrb
, sizeof(struct iscsi_wrb
));
3678 doorbell
|= beiscsi_conn
->beiscsi_conn_cid
& DB_WRB_POST_CID_MASK
;
3679 doorbell
|= (io_task
->pwrb_handle
->wrb_index
&
3680 DB_DEF_PDU_WRB_INDEX_MASK
) << DB_DEF_PDU_WRB_INDEX_SHIFT
;
3681 doorbell
|= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT
;
3683 iowrite32(doorbell
, phba
->db_va
+ DB_TXULP0_OFFSET
);
3687 static int beiscsi_mtask(struct iscsi_task
*task
)
3689 struct beiscsi_io_task
*io_task
= task
->dd_data
;
3690 struct iscsi_conn
*conn
= task
->conn
;
3691 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
3692 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
3693 struct iscsi_wrb
*pwrb
= NULL
;
3694 unsigned int doorbell
= 0;
3697 cid
= beiscsi_conn
->beiscsi_conn_cid
;
3698 pwrb
= io_task
->pwrb_handle
->pwrb
;
3699 memset(pwrb
, 0, sizeof(*pwrb
));
3700 AMAP_SET_BITS(struct amap_iscsi_wrb
, cmdsn_itt
, pwrb
,
3701 be32_to_cpu(task
->cmdsn
));
3702 AMAP_SET_BITS(struct amap_iscsi_wrb
, wrb_idx
, pwrb
,
3703 io_task
->pwrb_handle
->wrb_index
);
3704 AMAP_SET_BITS(struct amap_iscsi_wrb
, sgl_icd_idx
, pwrb
,
3705 io_task
->psgl_handle
->sgl_index
);
3707 switch (task
->hdr
->opcode
& ISCSI_OPCODE_MASK
) {
3708 case ISCSI_OP_LOGIN
:
3709 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
3711 AMAP_SET_BITS(struct amap_iscsi_wrb
, dmsg
, pwrb
, 0);
3712 AMAP_SET_BITS(struct amap_iscsi_wrb
, cmdsn_itt
, pwrb
, 1);
3713 hwi_write_buffer(pwrb
, task
);
3715 case ISCSI_OP_NOOP_OUT
:
3716 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
3718 if (task
->hdr
->ttt
== ISCSI_RESERVED_TAG
)
3719 AMAP_SET_BITS(struct amap_iscsi_wrb
, dmsg
, pwrb
, 0);
3721 AMAP_SET_BITS(struct amap_iscsi_wrb
, dmsg
, pwrb
, 1);
3722 hwi_write_buffer(pwrb
, task
);
3725 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
3727 AMAP_SET_BITS(struct amap_iscsi_wrb
, dmsg
, pwrb
, 0);
3728 hwi_write_buffer(pwrb
, task
);
3730 case ISCSI_OP_SCSI_TMFUNC
:
3731 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
3733 AMAP_SET_BITS(struct amap_iscsi_wrb
, dmsg
, pwrb
, 0);
3734 hwi_write_buffer(pwrb
, task
);
3736 case ISCSI_OP_LOGOUT
:
3737 AMAP_SET_BITS(struct amap_iscsi_wrb
, dmsg
, pwrb
, 0);
3738 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
3740 hwi_write_buffer(pwrb
, task
);
3744 SE_DEBUG(DBG_LVL_1
, "opcode =%d Not supported\n",
3745 task
->hdr
->opcode
& ISCSI_OPCODE_MASK
);
3749 AMAP_SET_BITS(struct amap_iscsi_wrb
, r2t_exp_dtl
, pwrb
,
3751 AMAP_SET_BITS(struct amap_iscsi_wrb
, ptr2nextwrb
, pwrb
,
3752 io_task
->pwrb_handle
->nxt_wrb_index
);
3753 be_dws_le_to_cpu(pwrb
, sizeof(struct iscsi_wrb
));
3755 doorbell
|= cid
& DB_WRB_POST_CID_MASK
;
3756 doorbell
|= (io_task
->pwrb_handle
->wrb_index
&
3757 DB_DEF_PDU_WRB_INDEX_MASK
) << DB_DEF_PDU_WRB_INDEX_SHIFT
;
3758 doorbell
|= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT
;
3759 iowrite32(doorbell
, phba
->db_va
+ DB_TXULP0_OFFSET
);
3763 static int beiscsi_task_xmit(struct iscsi_task
*task
)
3765 struct beiscsi_io_task
*io_task
= task
->dd_data
;
3766 struct scsi_cmnd
*sc
= task
->sc
;
3767 struct scatterlist
*sg
;
3769 unsigned int writedir
= 0, xferlen
= 0;
3772 return beiscsi_mtask(task
);
3774 io_task
->scsi_cmnd
= sc
;
3775 num_sg
= scsi_dma_map(sc
);
3777 SE_DEBUG(DBG_LVL_1
, " scsi_dma_map Failed\n")
3780 xferlen
= scsi_bufflen(sc
);
3781 sg
= scsi_sglist(sc
);
3782 if (sc
->sc_data_direction
== DMA_TO_DEVICE
) {
3784 SE_DEBUG(DBG_LVL_4
, "task->imm_count=0x%08x\n",
3788 return beiscsi_iotask(task
, sg
, num_sg
, xferlen
, writedir
);
3791 static void beiscsi_remove(struct pci_dev
*pcidev
)
3793 struct beiscsi_hba
*phba
= NULL
;
3794 struct hwi_controller
*phwi_ctrlr
;
3795 struct hwi_context_memory
*phwi_context
;
3796 struct be_eq_obj
*pbe_eq
;
3797 unsigned int i
, msix_vec
;
3798 u8
*real_offset
= 0;
3801 phba
= (struct beiscsi_hba
*)pci_get_drvdata(pcidev
);
3803 dev_err(&pcidev
->dev
, "beiscsi_remove called with no phba\n");
3807 phwi_ctrlr
= phba
->phwi_ctrlr
;
3808 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
3809 hwi_disable_intr(phba
);
3810 if (phba
->msix_enabled
) {
3811 for (i
= 0; i
<= phba
->num_cpus
; i
++) {
3812 msix_vec
= phba
->msix_entries
[i
].vector
;
3813 free_irq(msix_vec
, &phwi_context
->be_eq
[i
]);
3816 if (phba
->pcidev
->irq
)
3817 free_irq(phba
->pcidev
->irq
, phba
);
3818 pci_disable_msix(phba
->pcidev
);
3819 destroy_workqueue(phba
->wq
);
3820 if (blk_iopoll_enabled
)
3821 for (i
= 0; i
< phba
->num_cpus
; i
++) {
3822 pbe_eq
= &phwi_context
->be_eq
[i
];
3823 blk_iopoll_disable(&pbe_eq
->iopoll
);
3826 beiscsi_clean_port(phba
);
3827 beiscsi_free_mem(phba
);
3828 real_offset
= (u8
*)phba
->csr_va
+ MPU_EP_SEMAPHORE
;
3830 value
= readl((void *)real_offset
);
3832 if (value
& 0x00010000) {
3833 value
&= 0xfffeffff;
3834 writel(value
, (void *)real_offset
);
3836 beiscsi_unmap_pci_function(phba
);
3837 pci_free_consistent(phba
->pcidev
,
3838 phba
->ctrl
.mbox_mem_alloced
.size
,
3839 phba
->ctrl
.mbox_mem_alloced
.va
,
3840 phba
->ctrl
.mbox_mem_alloced
.dma
);
3841 iscsi_host_remove(phba
->shost
);
3842 pci_dev_put(phba
->pcidev
);
3843 iscsi_host_free(phba
->shost
);
3846 static void beiscsi_msix_enable(struct beiscsi_hba
*phba
)
3850 for (i
= 0; i
<= phba
->num_cpus
; i
++)
3851 phba
->msix_entries
[i
].entry
= i
;
3853 status
= pci_enable_msix(phba
->pcidev
, phba
->msix_entries
,
3854 (phba
->num_cpus
+ 1));
3856 phba
->msix_enabled
= true;
3861 static int __devinit
beiscsi_dev_probe(struct pci_dev
*pcidev
,
3862 const struct pci_device_id
*id
)
3864 struct beiscsi_hba
*phba
= NULL
;
3865 struct hwi_controller
*phwi_ctrlr
;
3866 struct hwi_context_memory
*phwi_context
;
3867 struct be_eq_obj
*pbe_eq
;
3868 int ret
, num_cpus
, i
;
3869 u8
*real_offset
= 0;
3872 ret
= beiscsi_enable_pci(pcidev
);
3874 dev_err(&pcidev
->dev
, "beiscsi_dev_probe-"
3875 " Failed to enable pci device\n");
3879 phba
= beiscsi_hba_alloc(pcidev
);
3881 dev_err(&pcidev
->dev
, "beiscsi_dev_probe-"
3882 " Failed in beiscsi_hba_alloc\n");
3886 switch (pcidev
->device
) {
3890 phba
->generation
= BE_GEN2
;
3894 phba
->generation
= BE_GEN3
;
3897 phba
->generation
= 0;
3901 num_cpus
= find_num_cpus();
3904 phba
->num_cpus
= num_cpus
;
3905 SE_DEBUG(DBG_LVL_8
, "num_cpus = %d\n", phba
->num_cpus
);
3908 beiscsi_msix_enable(phba
);
3909 ret
= be_ctrl_init(phba
, pcidev
);
3911 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe-"
3912 "Failed in be_ctrl_init\n");
3917 real_offset
= (u8
*)phba
->csr_va
+ MPU_EP_SEMAPHORE
;
3918 value
= readl((void *)real_offset
);
3919 if (value
& 0x00010000) {
3921 shost_printk(KERN_ERR
, phba
->shost
,
3922 "Loading Driver in crashdump mode\n");
3923 ret
= beiscsi_pci_soft_reset(phba
);
3925 shost_printk(KERN_ERR
, phba
->shost
,
3926 "Reset Failed. Aborting Crashdump\n");
3929 ret
= be_chk_reset_complete(phba
);
3931 shost_printk(KERN_ERR
, phba
->shost
,
3932 "Failed to get out of reset."
3933 "Aborting Crashdump\n");
3937 value
|= 0x00010000;
3938 writel(value
, (void *)real_offset
);
3943 spin_lock_init(&phba
->io_sgl_lock
);
3944 spin_lock_init(&phba
->mgmt_sgl_lock
);
3945 spin_lock_init(&phba
->isr_lock
);
3946 ret
= mgmt_get_fw_config(&phba
->ctrl
, phba
);
3948 shost_printk(KERN_ERR
, phba
->shost
,
3949 "Error getting fw config\n");
3952 phba
->shost
->max_id
= phba
->fw_config
.iscsi_cid_count
;
3953 beiscsi_get_params(phba
);
3954 phba
->shost
->can_queue
= phba
->params
.ios_per_ctrl
;
3955 ret
= beiscsi_init_port(phba
);
3957 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe-"
3958 "Failed in beiscsi_init_port\n");
3962 for (i
= 0; i
< MAX_MCC_CMD
; i
++) {
3963 init_waitqueue_head(&phba
->ctrl
.mcc_wait
[i
+ 1]);
3964 phba
->ctrl
.mcc_tag
[i
] = i
+ 1;
3965 phba
->ctrl
.mcc_numtag
[i
+ 1] = 0;
3966 phba
->ctrl
.mcc_tag_available
++;
3969 phba
->ctrl
.mcc_alloc_index
= phba
->ctrl
.mcc_free_index
= 0;
3971 snprintf(phba
->wq_name
, sizeof(phba
->wq_name
), "beiscsi_q_irq%u",
3972 phba
->shost
->host_no
);
3973 phba
->wq
= create_workqueue(phba
->wq_name
);
3975 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe-"
3976 "Failed to allocate work queue\n");
3980 INIT_WORK(&phba
->work_cqs
, beiscsi_process_all_cqs
);
3982 phwi_ctrlr
= phba
->phwi_ctrlr
;
3983 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
3984 if (blk_iopoll_enabled
) {
3985 for (i
= 0; i
< phba
->num_cpus
; i
++) {
3986 pbe_eq
= &phwi_context
->be_eq
[i
];
3987 blk_iopoll_init(&pbe_eq
->iopoll
, be_iopoll_budget
,
3989 blk_iopoll_enable(&pbe_eq
->iopoll
);
3992 ret
= beiscsi_init_irqs(phba
);
3994 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe-"
3995 "Failed to beiscsi_init_irqs\n");
3998 hwi_enable_intr(phba
);
3999 SE_DEBUG(DBG_LVL_8
, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
4003 destroy_workqueue(phba
->wq
);
4004 if (blk_iopoll_enabled
)
4005 for (i
= 0; i
< phba
->num_cpus
; i
++) {
4006 pbe_eq
= &phwi_context
->be_eq
[i
];
4007 blk_iopoll_disable(&pbe_eq
->iopoll
);
4010 beiscsi_clean_port(phba
);
4011 beiscsi_free_mem(phba
);
4013 real_offset
= (u8
*)phba
->csr_va
+ MPU_EP_SEMAPHORE
;
4015 value
= readl((void *)real_offset
);
4017 if (value
& 0x00010000) {
4018 value
&= 0xfffeffff;
4019 writel(value
, (void *)real_offset
);
4022 pci_free_consistent(phba
->pcidev
,
4023 phba
->ctrl
.mbox_mem_alloced
.size
,
4024 phba
->ctrl
.mbox_mem_alloced
.va
,
4025 phba
->ctrl
.mbox_mem_alloced
.dma
);
4026 beiscsi_unmap_pci_function(phba
);
4028 if (phba
->msix_enabled
)
4029 pci_disable_msix(phba
->pcidev
);
4030 iscsi_host_remove(phba
->shost
);
4031 pci_dev_put(phba
->pcidev
);
4032 iscsi_host_free(phba
->shost
);
4034 pci_disable_device(pcidev
);
4038 struct iscsi_transport beiscsi_iscsi_transport
= {
4039 .owner
= THIS_MODULE
,
4041 .caps
= CAP_RECOVERY_L0
| CAP_HDRDGST
| CAP_TEXT_NEGO
|
4042 CAP_MULTI_R2T
| CAP_DATADGST
| CAP_DATA_PATH_OFFLOAD
,
4043 .param_mask
= ISCSI_MAX_RECV_DLENGTH
|
4044 ISCSI_MAX_XMIT_DLENGTH
|
4047 ISCSI_INITIAL_R2T_EN
|
4052 ISCSI_PDU_INORDER_EN
|
4053 ISCSI_DATASEQ_INORDER_EN
|
4056 ISCSI_CONN_ADDRESS
|
4058 ISCSI_PERSISTENT_PORT
|
4059 ISCSI_PERSISTENT_ADDRESS
|
4060 ISCSI_TARGET_NAME
| ISCSI_TPGT
|
4061 ISCSI_USERNAME
| ISCSI_PASSWORD
|
4062 ISCSI_USERNAME_IN
| ISCSI_PASSWORD_IN
|
4063 ISCSI_FAST_ABORT
| ISCSI_ABORT_TMO
|
4064 ISCSI_LU_RESET_TMO
|
4065 ISCSI_PING_TMO
| ISCSI_RECV_TMO
|
4066 ISCSI_IFACE_NAME
| ISCSI_INITIATOR_NAME
,
4067 .host_param_mask
= ISCSI_HOST_HWADDRESS
| ISCSI_HOST_IPADDRESS
|
4068 ISCSI_HOST_INITIATOR_NAME
,
4069 .create_session
= beiscsi_session_create
,
4070 .destroy_session
= beiscsi_session_destroy
,
4071 .create_conn
= beiscsi_conn_create
,
4072 .bind_conn
= beiscsi_conn_bind
,
4073 .destroy_conn
= iscsi_conn_teardown
,
4074 .set_param
= beiscsi_set_param
,
4075 .get_conn_param
= beiscsi_conn_get_param
,
4076 .get_session_param
= iscsi_session_get_param
,
4077 .get_host_param
= beiscsi_get_host_param
,
4078 .start_conn
= beiscsi_conn_start
,
4079 .stop_conn
= iscsi_conn_stop
,
4080 .send_pdu
= iscsi_conn_send_pdu
,
4081 .xmit_task
= beiscsi_task_xmit
,
4082 .cleanup_task
= beiscsi_cleanup_task
,
4083 .alloc_pdu
= beiscsi_alloc_pdu
,
4084 .parse_pdu_itt
= beiscsi_parse_pdu
,
4085 .get_stats
= beiscsi_conn_get_stats
,
4086 .ep_connect
= beiscsi_ep_connect
,
4087 .ep_poll
= beiscsi_ep_poll
,
4088 .ep_disconnect
= beiscsi_ep_disconnect
,
4089 .session_recovery_timedout
= iscsi_session_recovery_timedout
,
4092 static struct pci_driver beiscsi_pci_driver
= {
4094 .probe
= beiscsi_dev_probe
,
4095 .remove
= beiscsi_remove
,
4096 .id_table
= beiscsi_pci_id_table
4100 static int __init
beiscsi_module_init(void)
4104 beiscsi_scsi_transport
=
4105 iscsi_register_transport(&beiscsi_iscsi_transport
);
4106 if (!beiscsi_scsi_transport
) {
4108 "beiscsi_module_init - Unable to register beiscsi"
4112 SE_DEBUG(DBG_LVL_8
, "In beiscsi_module_init, tt=%p\n",
4113 &beiscsi_iscsi_transport
);
4115 ret
= pci_register_driver(&beiscsi_pci_driver
);
4118 "beiscsi_module_init - Unable to register"
4119 "beiscsi pci driver.\n");
4120 goto unregister_iscsi_transport
;
4124 unregister_iscsi_transport
:
4125 iscsi_unregister_transport(&beiscsi_iscsi_transport
);
4129 static void __exit
beiscsi_module_exit(void)
4131 pci_unregister_driver(&beiscsi_pci_driver
);
4132 iscsi_unregister_transport(&beiscsi_iscsi_transport
);
4135 module_init(beiscsi_module_init
);
4136 module_exit(beiscsi_module_exit
);