Commit | Line | Data |
---|---|---|
851b1642 AK |
1 | /* |
2 | * Linux driver for VMware's para-virtualized SCSI HBA. | |
3 | * | |
4 | * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License as published by the | |
8 | * Free Software Foundation; version 2 of the License and no later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
13 | * NON INFRINGEMENT. See the GNU General Public License for more | |
14 | * details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
19 | * | |
20 | * Maintained by: Alok N Kataria <akataria@vmware.com> | |
21 | * | |
22 | */ | |
23 | ||
24 | #include <linux/kernel.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/interrupt.h> | |
27 | #include <linux/workqueue.h> | |
28 | #include <linux/pci.h> | |
29 | ||
30 | #include <scsi/scsi.h> | |
31 | #include <scsi/scsi_host.h> | |
32 | #include <scsi/scsi_cmnd.h> | |
33 | #include <scsi/scsi_device.h> | |
34 | ||
35 | #include "vmw_pvscsi.h" | |
36 | ||
37 | #define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver" | |
38 | ||
39 | MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC); | |
40 | MODULE_AUTHOR("VMware, Inc."); | |
41 | MODULE_LICENSE("GPL"); | |
42 | MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING); | |
43 | ||
44 | #define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8 | |
45 | #define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1 | |
46 | #define PVSCSI_DEFAULT_QUEUE_DEPTH 64 | |
47 | #define SGL_SIZE PAGE_SIZE | |
48 | ||
49 | struct pvscsi_sg_list { | |
50 | struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT]; | |
51 | }; | |
52 | ||
53 | struct pvscsi_ctx { | |
54 | /* | |
55 | * The index of the context in cmd_map serves as the context ID for a | |
56 | * 1-to-1 mapping completions back to requests. | |
57 | */ | |
58 | struct scsi_cmnd *cmd; | |
59 | struct pvscsi_sg_list *sgl; | |
60 | struct list_head list; | |
61 | dma_addr_t dataPA; | |
62 | dma_addr_t sensePA; | |
63 | dma_addr_t sglPA; | |
64 | }; | |
65 | ||
66 | struct pvscsi_adapter { | |
67 | char *mmioBase; | |
68 | unsigned int irq; | |
69 | u8 rev; | |
70 | bool use_msi; | |
71 | bool use_msix; | |
72 | bool use_msg; | |
73 | ||
74 | spinlock_t hw_lock; | |
75 | ||
76 | struct workqueue_struct *workqueue; | |
77 | struct work_struct work; | |
78 | ||
79 | struct PVSCSIRingReqDesc *req_ring; | |
80 | unsigned req_pages; | |
81 | unsigned req_depth; | |
82 | dma_addr_t reqRingPA; | |
83 | ||
84 | struct PVSCSIRingCmpDesc *cmp_ring; | |
85 | unsigned cmp_pages; | |
86 | dma_addr_t cmpRingPA; | |
87 | ||
88 | struct PVSCSIRingMsgDesc *msg_ring; | |
89 | unsigned msg_pages; | |
90 | dma_addr_t msgRingPA; | |
91 | ||
92 | struct PVSCSIRingsState *rings_state; | |
93 | dma_addr_t ringStatePA; | |
94 | ||
95 | struct pci_dev *dev; | |
96 | struct Scsi_Host *host; | |
97 | ||
98 | struct list_head cmd_pool; | |
99 | struct pvscsi_ctx *cmd_map; | |
100 | }; | |
101 | ||
102 | ||
103 | /* Command line parameters */ | |
104 | static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING; | |
105 | static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING; | |
106 | static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH; | |
107 | static bool pvscsi_disable_msi; | |
108 | static bool pvscsi_disable_msix; | |
109 | static bool pvscsi_use_msg = true; | |
110 | ||
111 | #define PVSCSI_RW (S_IRUSR | S_IWUSR) | |
112 | ||
113 | module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW); | |
114 | MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default=" | |
115 | __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")"); | |
116 | ||
117 | module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW); | |
118 | MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default=" | |
119 | __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")"); | |
120 | ||
121 | module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW); | |
122 | MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default=" | |
123 | __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")"); | |
124 | ||
125 | module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW); | |
126 | MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); | |
127 | ||
128 | module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW); | |
129 | MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); | |
130 | ||
131 | module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW); | |
132 | MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)"); | |
133 | ||
134 | static const struct pci_device_id pvscsi_pci_tbl[] = { | |
135 | { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) }, | |
136 | { 0 } | |
137 | }; | |
138 | ||
139 | MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl); | |
140 | ||
141 | static struct device * | |
142 | pvscsi_dev(const struct pvscsi_adapter *adapter) | |
143 | { | |
144 | return &(adapter->dev->dev); | |
145 | } | |
146 | ||
147 | static struct pvscsi_ctx * | |
148 | pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) | |
149 | { | |
150 | struct pvscsi_ctx *ctx, *end; | |
151 | ||
152 | end = &adapter->cmd_map[adapter->req_depth]; | |
153 | for (ctx = adapter->cmd_map; ctx < end; ctx++) | |
154 | if (ctx->cmd == cmd) | |
155 | return ctx; | |
156 | ||
157 | return NULL; | |
158 | } | |
159 | ||
160 | static struct pvscsi_ctx * | |
161 | pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) | |
162 | { | |
163 | struct pvscsi_ctx *ctx; | |
164 | ||
165 | if (list_empty(&adapter->cmd_pool)) | |
166 | return NULL; | |
167 | ||
168 | ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list); | |
169 | ctx->cmd = cmd; | |
170 | list_del(&ctx->list); | |
171 | ||
172 | return ctx; | |
173 | } | |
174 | ||
175 | static void pvscsi_release_context(struct pvscsi_adapter *adapter, | |
176 | struct pvscsi_ctx *ctx) | |
177 | { | |
178 | ctx->cmd = NULL; | |
179 | list_add(&ctx->list, &adapter->cmd_pool); | |
180 | } | |
181 | ||
182 | /* | |
183 | * Map a pvscsi_ctx struct to a context ID field value; we map to a simple | |
184 | * non-zero integer. ctx always points to an entry in cmd_map array, hence | |
185 | * the return value is always >=1. | |
186 | */ | |
187 | static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter, | |
188 | const struct pvscsi_ctx *ctx) | |
189 | { | |
190 | return ctx - adapter->cmd_map + 1; | |
191 | } | |
192 | ||
193 | static struct pvscsi_ctx * | |
194 | pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context) | |
195 | { | |
196 | return &adapter->cmd_map[context - 1]; | |
197 | } | |
198 | ||
199 | static void pvscsi_reg_write(const struct pvscsi_adapter *adapter, | |
200 | u32 offset, u32 val) | |
201 | { | |
202 | writel(val, adapter->mmioBase + offset); | |
203 | } | |
204 | ||
205 | static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset) | |
206 | { | |
207 | return readl(adapter->mmioBase + offset); | |
208 | } | |
209 | ||
210 | static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter) | |
211 | { | |
212 | return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS); | |
213 | } | |
214 | ||
215 | static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter, | |
216 | u32 val) | |
217 | { | |
218 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val); | |
219 | } | |
220 | ||
221 | static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter) | |
222 | { | |
223 | u32 intr_bits; | |
224 | ||
225 | intr_bits = PVSCSI_INTR_CMPL_MASK; | |
226 | if (adapter->use_msg) | |
227 | intr_bits |= PVSCSI_INTR_MSG_MASK; | |
228 | ||
229 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits); | |
230 | } | |
231 | ||
232 | static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter) | |
233 | { | |
234 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0); | |
235 | } | |
236 | ||
237 | static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter, | |
238 | u32 cmd, const void *desc, size_t len) | |
239 | { | |
240 | const u32 *ptr = desc; | |
241 | size_t i; | |
242 | ||
243 | len /= sizeof(*ptr); | |
244 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd); | |
245 | for (i = 0; i < len; i++) | |
246 | pvscsi_reg_write(adapter, | |
247 | PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]); | |
248 | } | |
249 | ||
250 | static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter, | |
251 | const struct pvscsi_ctx *ctx) | |
252 | { | |
253 | struct PVSCSICmdDescAbortCmd cmd = { 0 }; | |
254 | ||
255 | cmd.target = ctx->cmd->device->id; | |
256 | cmd.context = pvscsi_map_context(adapter, ctx); | |
257 | ||
258 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd)); | |
259 | } | |
260 | ||
261 | static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter) | |
262 | { | |
263 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0); | |
264 | } | |
265 | ||
266 | static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter) | |
267 | { | |
268 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0); | |
269 | } | |
270 | ||
271 | static int scsi_is_rw(unsigned char op) | |
272 | { | |
273 | return op == READ_6 || op == WRITE_6 || | |
274 | op == READ_10 || op == WRITE_10 || | |
275 | op == READ_12 || op == WRITE_12 || | |
276 | op == READ_16 || op == WRITE_16; | |
277 | } | |
278 | ||
279 | static void pvscsi_kick_io(const struct pvscsi_adapter *adapter, | |
280 | unsigned char op) | |
281 | { | |
282 | if (scsi_is_rw(op)) | |
283 | pvscsi_kick_rw_io(adapter); | |
284 | else | |
285 | pvscsi_process_request_ring(adapter); | |
286 | } | |
287 | ||
288 | static void ll_adapter_reset(const struct pvscsi_adapter *adapter) | |
289 | { | |
290 | dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter); | |
291 | ||
292 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0); | |
293 | } | |
294 | ||
295 | static void ll_bus_reset(const struct pvscsi_adapter *adapter) | |
296 | { | |
297 | dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter); | |
298 | ||
299 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0); | |
300 | } | |
301 | ||
302 | static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target) | |
303 | { | |
304 | struct PVSCSICmdDescResetDevice cmd = { 0 }; | |
305 | ||
306 | dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target); | |
307 | ||
308 | cmd.target = target; | |
309 | ||
310 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE, | |
311 | &cmd, sizeof(cmd)); | |
312 | } | |
313 | ||
314 | static void pvscsi_create_sg(struct pvscsi_ctx *ctx, | |
315 | struct scatterlist *sg, unsigned count) | |
316 | { | |
317 | unsigned i; | |
318 | struct PVSCSISGElement *sge; | |
319 | ||
320 | BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT); | |
321 | ||
322 | sge = &ctx->sgl->sge[0]; | |
323 | for (i = 0; i < count; i++, sg++) { | |
324 | sge[i].addr = sg_dma_address(sg); | |
325 | sge[i].length = sg_dma_len(sg); | |
326 | sge[i].flags = 0; | |
327 | } | |
328 | } | |
329 | ||
330 | /* | |
331 | * Map all data buffers for a command into PCI space and | |
332 | * setup the scatter/gather list if needed. | |
333 | */ | |
334 | static void pvscsi_map_buffers(struct pvscsi_adapter *adapter, | |
335 | struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd, | |
336 | struct PVSCSIRingReqDesc *e) | |
337 | { | |
338 | unsigned count; | |
339 | unsigned bufflen = scsi_bufflen(cmd); | |
340 | struct scatterlist *sg; | |
341 | ||
342 | e->dataLen = bufflen; | |
343 | e->dataAddr = 0; | |
344 | if (bufflen == 0) | |
345 | return; | |
346 | ||
347 | sg = scsi_sglist(cmd); | |
348 | count = scsi_sg_count(cmd); | |
349 | if (count != 0) { | |
350 | int segs = scsi_dma_map(cmd); | |
351 | if (segs > 1) { | |
352 | pvscsi_create_sg(ctx, sg, segs); | |
353 | ||
354 | e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; | |
355 | ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl, | |
356 | SGL_SIZE, PCI_DMA_TODEVICE); | |
357 | e->dataAddr = ctx->sglPA; | |
358 | } else | |
359 | e->dataAddr = sg_dma_address(sg); | |
360 | } else { | |
361 | /* | |
362 | * In case there is no S/G list, scsi_sglist points | |
363 | * directly to the buffer. | |
364 | */ | |
365 | ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen, | |
366 | cmd->sc_data_direction); | |
367 | e->dataAddr = ctx->dataPA; | |
368 | } | |
369 | } | |
370 | ||
371 | static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter, | |
372 | struct pvscsi_ctx *ctx) | |
373 | { | |
374 | struct scsi_cmnd *cmd; | |
375 | unsigned bufflen; | |
376 | ||
377 | cmd = ctx->cmd; | |
378 | bufflen = scsi_bufflen(cmd); | |
379 | ||
380 | if (bufflen != 0) { | |
381 | unsigned count = scsi_sg_count(cmd); | |
382 | ||
383 | if (count != 0) { | |
384 | scsi_dma_unmap(cmd); | |
385 | if (ctx->sglPA) { | |
386 | pci_unmap_single(adapter->dev, ctx->sglPA, | |
387 | SGL_SIZE, PCI_DMA_TODEVICE); | |
388 | ctx->sglPA = 0; | |
389 | } | |
390 | } else | |
391 | pci_unmap_single(adapter->dev, ctx->dataPA, bufflen, | |
392 | cmd->sc_data_direction); | |
393 | } | |
394 | if (cmd->sense_buffer) | |
395 | pci_unmap_single(adapter->dev, ctx->sensePA, | |
396 | SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); | |
397 | } | |
398 | ||
399 | static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter) | |
400 | { | |
401 | adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE, | |
402 | &adapter->ringStatePA); | |
403 | if (!adapter->rings_state) | |
404 | return -ENOMEM; | |
405 | ||
406 | adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING, | |
407 | pvscsi_ring_pages); | |
408 | adapter->req_depth = adapter->req_pages | |
409 | * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; | |
410 | adapter->req_ring = pci_alloc_consistent(adapter->dev, | |
411 | adapter->req_pages * PAGE_SIZE, | |
412 | &adapter->reqRingPA); | |
413 | if (!adapter->req_ring) | |
414 | return -ENOMEM; | |
415 | ||
416 | adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING, | |
417 | pvscsi_ring_pages); | |
418 | adapter->cmp_ring = pci_alloc_consistent(adapter->dev, | |
419 | adapter->cmp_pages * PAGE_SIZE, | |
420 | &adapter->cmpRingPA); | |
421 | if (!adapter->cmp_ring) | |
422 | return -ENOMEM; | |
423 | ||
424 | BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE)); | |
425 | BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE)); | |
426 | BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE)); | |
427 | ||
428 | if (!adapter->use_msg) | |
429 | return 0; | |
430 | ||
431 | adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING, | |
432 | pvscsi_msg_ring_pages); | |
433 | adapter->msg_ring = pci_alloc_consistent(adapter->dev, | |
434 | adapter->msg_pages * PAGE_SIZE, | |
435 | &adapter->msgRingPA); | |
436 | if (!adapter->msg_ring) | |
437 | return -ENOMEM; | |
438 | BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE)); | |
439 | ||
440 | return 0; | |
441 | } | |
442 | ||
443 | static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter) | |
444 | { | |
445 | struct PVSCSICmdDescSetupRings cmd = { 0 }; | |
446 | dma_addr_t base; | |
447 | unsigned i; | |
448 | ||
449 | cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT; | |
450 | cmd.reqRingNumPages = adapter->req_pages; | |
451 | cmd.cmpRingNumPages = adapter->cmp_pages; | |
452 | ||
453 | base = adapter->reqRingPA; | |
454 | for (i = 0; i < adapter->req_pages; i++) { | |
455 | cmd.reqRingPPNs[i] = base >> PAGE_SHIFT; | |
456 | base += PAGE_SIZE; | |
457 | } | |
458 | ||
459 | base = adapter->cmpRingPA; | |
460 | for (i = 0; i < adapter->cmp_pages; i++) { | |
461 | cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT; | |
462 | base += PAGE_SIZE; | |
463 | } | |
464 | ||
465 | memset(adapter->rings_state, 0, PAGE_SIZE); | |
466 | memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE); | |
467 | memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE); | |
468 | ||
469 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS, | |
470 | &cmd, sizeof(cmd)); | |
471 | ||
472 | if (adapter->use_msg) { | |
473 | struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 }; | |
474 | ||
475 | cmd_msg.numPages = adapter->msg_pages; | |
476 | ||
477 | base = adapter->msgRingPA; | |
478 | for (i = 0; i < adapter->msg_pages; i++) { | |
479 | cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT; | |
480 | base += PAGE_SIZE; | |
481 | } | |
482 | memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE); | |
483 | ||
484 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING, | |
485 | &cmd_msg, sizeof(cmd_msg)); | |
486 | } | |
487 | } | |
488 | ||
489 | /* | |
490 | * Pull a completion descriptor off and pass the completion back | |
491 | * to the SCSI mid layer. | |
492 | */ | |
493 | static void pvscsi_complete_request(struct pvscsi_adapter *adapter, | |
494 | const struct PVSCSIRingCmpDesc *e) | |
495 | { | |
496 | struct pvscsi_ctx *ctx; | |
497 | struct scsi_cmnd *cmd; | |
498 | u32 btstat = e->hostStatus; | |
499 | u32 sdstat = e->scsiStatus; | |
500 | ||
501 | ctx = pvscsi_get_context(adapter, e->context); | |
502 | cmd = ctx->cmd; | |
503 | pvscsi_unmap_buffers(adapter, ctx); | |
504 | pvscsi_release_context(adapter, ctx); | |
505 | cmd->result = 0; | |
506 | ||
507 | if (sdstat != SAM_STAT_GOOD && | |
508 | (btstat == BTSTAT_SUCCESS || | |
509 | btstat == BTSTAT_LINKED_COMMAND_COMPLETED || | |
510 | btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) { | |
511 | cmd->result = (DID_OK << 16) | sdstat; | |
512 | if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer) | |
513 | cmd->result |= (DRIVER_SENSE << 24); | |
514 | } else | |
515 | switch (btstat) { | |
516 | case BTSTAT_SUCCESS: | |
517 | case BTSTAT_LINKED_COMMAND_COMPLETED: | |
518 | case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG: | |
519 | /* If everything went fine, let's move on.. */ | |
520 | cmd->result = (DID_OK << 16); | |
521 | break; | |
522 | ||
523 | case BTSTAT_DATARUN: | |
524 | case BTSTAT_DATA_UNDERRUN: | |
525 | /* Report residual data in underruns */ | |
526 | scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); | |
527 | cmd->result = (DID_ERROR << 16); | |
528 | break; | |
529 | ||
530 | case BTSTAT_SELTIMEO: | |
531 | /* Our emulation returns this for non-connected devs */ | |
532 | cmd->result = (DID_BAD_TARGET << 16); | |
533 | break; | |
534 | ||
535 | case BTSTAT_LUNMISMATCH: | |
536 | case BTSTAT_TAGREJECT: | |
537 | case BTSTAT_BADMSG: | |
538 | cmd->result = (DRIVER_INVALID << 24); | |
539 | /* fall through */ | |
540 | ||
541 | case BTSTAT_HAHARDWARE: | |
542 | case BTSTAT_INVPHASE: | |
543 | case BTSTAT_HATIMEOUT: | |
544 | case BTSTAT_NORESPONSE: | |
545 | case BTSTAT_DISCONNECT: | |
546 | case BTSTAT_HASOFTWARE: | |
547 | case BTSTAT_BUSFREE: | |
548 | case BTSTAT_SENSFAILED: | |
549 | cmd->result |= (DID_ERROR << 16); | |
550 | break; | |
551 | ||
552 | case BTSTAT_SENTRST: | |
553 | case BTSTAT_RECVRST: | |
554 | case BTSTAT_BUSRESET: | |
555 | cmd->result = (DID_RESET << 16); | |
556 | break; | |
557 | ||
558 | case BTSTAT_ABORTQUEUE: | |
559 | cmd->result = (DID_ABORT << 16); | |
560 | break; | |
561 | ||
562 | case BTSTAT_SCSIPARITY: | |
563 | cmd->result = (DID_PARITY << 16); | |
564 | break; | |
565 | ||
566 | default: | |
567 | cmd->result = (DID_ERROR << 16); | |
568 | scmd_printk(KERN_DEBUG, cmd, | |
569 | "Unknown completion status: 0x%x\n", | |
570 | btstat); | |
571 | } | |
572 | ||
573 | dev_dbg(&cmd->device->sdev_gendev, | |
574 | "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n", | |
575 | cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat); | |
576 | ||
577 | cmd->scsi_done(cmd); | |
578 | } | |
579 | ||
580 | /* | |
581 | * barrier usage : Since the PVSCSI device is emulated, there could be cases | |
582 | * where we may want to serialize some accesses between the driver and the | |
583 | * emulation layer. We use compiler barriers instead of the more expensive | |
584 | * memory barriers because PVSCSI is only supported on X86 which has strong | |
585 | * memory access ordering. | |
586 | */ | |
587 | static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter) | |
588 | { | |
589 | struct PVSCSIRingsState *s = adapter->rings_state; | |
590 | struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring; | |
591 | u32 cmp_entries = s->cmpNumEntriesLog2; | |
592 | ||
593 | while (s->cmpConsIdx != s->cmpProdIdx) { | |
594 | struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx & | |
595 | MASK(cmp_entries)); | |
596 | /* | |
597 | * This barrier() ensures that *e is not dereferenced while | |
598 | * the device emulation still writes data into the slot. | |
599 | * Since the device emulation advances s->cmpProdIdx only after | |
600 | * updating the slot we want to check it first. | |
601 | */ | |
602 | barrier(); | |
603 | pvscsi_complete_request(adapter, e); | |
604 | /* | |
605 | * This barrier() ensures that compiler doesn't reorder write | |
606 | * to s->cmpConsIdx before the read of (*e) inside | |
607 | * pvscsi_complete_request. Otherwise, device emulation may | |
608 | * overwrite *e before we had a chance to read it. | |
609 | */ | |
610 | barrier(); | |
611 | s->cmpConsIdx++; | |
612 | } | |
613 | } | |
614 | ||
615 | /* | |
616 | * Translate a Linux SCSI request into a request ring entry. | |
617 | */ | |
618 | static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, | |
619 | struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd) | |
620 | { | |
621 | struct PVSCSIRingsState *s; | |
622 | struct PVSCSIRingReqDesc *e; | |
623 | struct scsi_device *sdev; | |
624 | u32 req_entries; | |
625 | ||
626 | s = adapter->rings_state; | |
627 | sdev = cmd->device; | |
628 | req_entries = s->reqNumEntriesLog2; | |
629 | ||
630 | /* | |
631 | * If this condition holds, we might have room on the request ring, but | |
632 | * we might not have room on the completion ring for the response. | |
633 | * However, we have already ruled out this possibility - we would not | |
634 | * have successfully allocated a context if it were true, since we only | |
635 | * have one context per request entry. Check for it anyway, since it | |
636 | * would be a serious bug. | |
637 | */ | |
638 | if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) { | |
639 | scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: " | |
640 | "ring full: reqProdIdx=%d cmpConsIdx=%d\n", | |
641 | s->reqProdIdx, s->cmpConsIdx); | |
642 | return -1; | |
643 | } | |
644 | ||
645 | e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries)); | |
646 | ||
647 | e->bus = sdev->channel; | |
648 | e->target = sdev->id; | |
649 | memset(e->lun, 0, sizeof(e->lun)); | |
650 | e->lun[1] = sdev->lun; | |
651 | ||
652 | if (cmd->sense_buffer) { | |
653 | ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer, | |
654 | SCSI_SENSE_BUFFERSIZE, | |
655 | PCI_DMA_FROMDEVICE); | |
656 | e->senseAddr = ctx->sensePA; | |
657 | e->senseLen = SCSI_SENSE_BUFFERSIZE; | |
658 | } else { | |
659 | e->senseLen = 0; | |
660 | e->senseAddr = 0; | |
661 | } | |
662 | e->cdbLen = cmd->cmd_len; | |
663 | e->vcpuHint = smp_processor_id(); | |
664 | memcpy(e->cdb, cmd->cmnd, e->cdbLen); | |
665 | ||
666 | e->tag = SIMPLE_QUEUE_TAG; | |
667 | if (sdev->tagged_supported && | |
668 | (cmd->tag == HEAD_OF_QUEUE_TAG || | |
669 | cmd->tag == ORDERED_QUEUE_TAG)) | |
670 | e->tag = cmd->tag; | |
671 | ||
672 | if (cmd->sc_data_direction == DMA_FROM_DEVICE) | |
673 | e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST; | |
674 | else if (cmd->sc_data_direction == DMA_TO_DEVICE) | |
675 | e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE; | |
676 | else if (cmd->sc_data_direction == DMA_NONE) | |
677 | e->flags = PVSCSI_FLAG_CMD_DIR_NONE; | |
678 | else | |
679 | e->flags = 0; | |
680 | ||
681 | pvscsi_map_buffers(adapter, ctx, cmd, e); | |
682 | ||
683 | e->context = pvscsi_map_context(adapter, ctx); | |
684 | ||
685 | barrier(); | |
686 | ||
687 | s->reqProdIdx++; | |
688 | ||
689 | return 0; | |
690 | } | |
691 | ||
692 | static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | |
693 | { | |
694 | struct Scsi_Host *host = cmd->device->host; | |
695 | struct pvscsi_adapter *adapter = shost_priv(host); | |
696 | struct pvscsi_ctx *ctx; | |
697 | unsigned long flags; | |
698 | ||
699 | spin_lock_irqsave(&adapter->hw_lock, flags); | |
700 | ||
701 | ctx = pvscsi_acquire_context(adapter, cmd); | |
702 | if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) { | |
703 | if (ctx) | |
704 | pvscsi_release_context(adapter, ctx); | |
705 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | |
706 | return SCSI_MLQUEUE_HOST_BUSY; | |
707 | } | |
708 | ||
709 | cmd->scsi_done = done; | |
710 | ||
711 | dev_dbg(&cmd->device->sdev_gendev, | |
712 | "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]); | |
713 | ||
714 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | |
715 | ||
716 | pvscsi_kick_io(adapter, cmd->cmnd[0]); | |
717 | ||
718 | return 0; | |
719 | } | |
720 | ||
721 | static int pvscsi_abort(struct scsi_cmnd *cmd) | |
722 | { | |
723 | struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); | |
724 | struct pvscsi_ctx *ctx; | |
725 | unsigned long flags; | |
726 | ||
727 | scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", | |
728 | adapter->host->host_no, cmd); | |
729 | ||
730 | spin_lock_irqsave(&adapter->hw_lock, flags); | |
731 | ||
732 | /* | |
733 | * Poll the completion ring first - we might be trying to abort | |
734 | * a command that is waiting to be dispatched in the completion ring. | |
735 | */ | |
736 | pvscsi_process_completion_ring(adapter); | |
737 | ||
738 | /* | |
739 | * If there is no context for the command, it either already succeeded | |
740 | * or else was never properly issued. Not our problem. | |
741 | */ | |
742 | ctx = pvscsi_find_context(adapter, cmd); | |
743 | if (!ctx) { | |
744 | scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd); | |
745 | goto out; | |
746 | } | |
747 | ||
748 | pvscsi_abort_cmd(adapter, ctx); | |
749 | ||
750 | pvscsi_process_completion_ring(adapter); | |
751 | ||
752 | out: | |
753 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | |
754 | return SUCCESS; | |
755 | } | |
756 | ||
757 | /* | |
758 | * Abort all outstanding requests. This is only safe to use if the completion | |
759 | * ring will never be walked again or the device has been reset, because it | |
760 | * destroys the 1-1 mapping between context field passed to emulation and our | |
761 | * request structure. | |
762 | */ | |
763 | static void pvscsi_reset_all(struct pvscsi_adapter *adapter) | |
764 | { | |
765 | unsigned i; | |
766 | ||
767 | for (i = 0; i < adapter->req_depth; i++) { | |
768 | struct pvscsi_ctx *ctx = &adapter->cmd_map[i]; | |
769 | struct scsi_cmnd *cmd = ctx->cmd; | |
770 | if (cmd) { | |
771 | scmd_printk(KERN_ERR, cmd, | |
772 | "Forced reset on cmd %p\n", cmd); | |
773 | pvscsi_unmap_buffers(adapter, ctx); | |
774 | pvscsi_release_context(adapter, ctx); | |
775 | cmd->result = (DID_RESET << 16); | |
776 | cmd->scsi_done(cmd); | |
777 | } | |
778 | } | |
779 | } | |
780 | ||
781 | static int pvscsi_host_reset(struct scsi_cmnd *cmd) | |
782 | { | |
783 | struct Scsi_Host *host = cmd->device->host; | |
784 | struct pvscsi_adapter *adapter = shost_priv(host); | |
785 | unsigned long flags; | |
786 | bool use_msg; | |
787 | ||
788 | scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n"); | |
789 | ||
790 | spin_lock_irqsave(&adapter->hw_lock, flags); | |
791 | ||
792 | use_msg = adapter->use_msg; | |
793 | ||
794 | if (use_msg) { | |
795 | adapter->use_msg = 0; | |
796 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | |
797 | ||
798 | /* | |
799 | * Now that we know that the ISR won't add more work on the | |
800 | * workqueue we can safely flush any outstanding work. | |
801 | */ | |
802 | flush_workqueue(adapter->workqueue); | |
803 | spin_lock_irqsave(&adapter->hw_lock, flags); | |
804 | } | |
805 | ||
806 | /* | |
807 | * We're going to tear down the entire ring structure and set it back | |
808 | * up, so stalling new requests until all completions are flushed and | |
809 | * the rings are back in place. | |
810 | */ | |
811 | ||
812 | pvscsi_process_request_ring(adapter); | |
813 | ||
814 | ll_adapter_reset(adapter); | |
815 | ||
816 | /* | |
817 | * Now process any completions. Note we do this AFTER adapter reset, | |
818 | * which is strange, but stops races where completions get posted | |
819 | * between processing the ring and issuing the reset. The backend will | |
820 | * not touch the ring memory after reset, so the immediately pre-reset | |
821 | * completion ring state is still valid. | |
822 | */ | |
823 | pvscsi_process_completion_ring(adapter); | |
824 | ||
825 | pvscsi_reset_all(adapter); | |
826 | adapter->use_msg = use_msg; | |
827 | pvscsi_setup_all_rings(adapter); | |
828 | pvscsi_unmask_intr(adapter); | |
829 | ||
830 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | |
831 | ||
832 | return SUCCESS; | |
833 | } | |
834 | ||
835 | static int pvscsi_bus_reset(struct scsi_cmnd *cmd) | |
836 | { | |
837 | struct Scsi_Host *host = cmd->device->host; | |
838 | struct pvscsi_adapter *adapter = shost_priv(host); | |
839 | unsigned long flags; | |
840 | ||
841 | scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n"); | |
842 | ||
843 | /* | |
844 | * We don't want to queue new requests for this bus after | |
845 | * flushing all pending requests to emulation, since new | |
846 | * requests could then sneak in during this bus reset phase, | |
847 | * so take the lock now. | |
848 | */ | |
849 | spin_lock_irqsave(&adapter->hw_lock, flags); | |
850 | ||
851 | pvscsi_process_request_ring(adapter); | |
852 | ll_bus_reset(adapter); | |
853 | pvscsi_process_completion_ring(adapter); | |
854 | ||
855 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | |
856 | ||
857 | return SUCCESS; | |
858 | } | |
859 | ||
860 | static int pvscsi_device_reset(struct scsi_cmnd *cmd) | |
861 | { | |
862 | struct Scsi_Host *host = cmd->device->host; | |
863 | struct pvscsi_adapter *adapter = shost_priv(host); | |
864 | unsigned long flags; | |
865 | ||
866 | scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n", | |
867 | host->host_no, cmd->device->id); | |
868 | ||
869 | /* | |
870 | * We don't want to queue new requests for this device after flushing | |
871 | * all pending requests to emulation, since new requests could then | |
872 | * sneak in during this device reset phase, so take the lock now. | |
873 | */ | |
874 | spin_lock_irqsave(&adapter->hw_lock, flags); | |
875 | ||
876 | pvscsi_process_request_ring(adapter); | |
877 | ll_device_reset(adapter, cmd->device->id); | |
878 | pvscsi_process_completion_ring(adapter); | |
879 | ||
880 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | |
881 | ||
882 | return SUCCESS; | |
883 | } | |
884 | ||
885 | static struct scsi_host_template pvscsi_template; | |
886 | ||
887 | static const char *pvscsi_info(struct Scsi_Host *host) | |
888 | { | |
889 | struct pvscsi_adapter *adapter = shost_priv(host); | |
890 | static char buf[256]; | |
891 | ||
892 | sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: " | |
893 | "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev, | |
894 | adapter->req_pages, adapter->cmp_pages, adapter->msg_pages, | |
895 | pvscsi_template.cmd_per_lun); | |
896 | ||
897 | return buf; | |
898 | } | |
899 | ||
900 | static struct scsi_host_template pvscsi_template = { | |
901 | .module = THIS_MODULE, | |
902 | .name = "VMware PVSCSI Host Adapter", | |
903 | .proc_name = "vmw_pvscsi", | |
904 | .info = pvscsi_info, | |
905 | .queuecommand = pvscsi_queue, | |
906 | .this_id = -1, | |
907 | .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT, | |
908 | .dma_boundary = UINT_MAX, | |
909 | .max_sectors = 0xffff, | |
910 | .use_clustering = ENABLE_CLUSTERING, | |
911 | .eh_abort_handler = pvscsi_abort, | |
912 | .eh_device_reset_handler = pvscsi_device_reset, | |
913 | .eh_bus_reset_handler = pvscsi_bus_reset, | |
914 | .eh_host_reset_handler = pvscsi_host_reset, | |
915 | }; | |
916 | ||
917 | static void pvscsi_process_msg(const struct pvscsi_adapter *adapter, | |
918 | const struct PVSCSIRingMsgDesc *e) | |
919 | { | |
920 | struct PVSCSIRingsState *s = adapter->rings_state; | |
921 | struct Scsi_Host *host = adapter->host; | |
922 | struct scsi_device *sdev; | |
923 | ||
924 | printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n", | |
925 | e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2); | |
926 | ||
927 | BUILD_BUG_ON(PVSCSI_MSG_LAST != 2); | |
928 | ||
929 | if (e->type == PVSCSI_MSG_DEV_ADDED) { | |
930 | struct PVSCSIMsgDescDevStatusChanged *desc; | |
931 | desc = (struct PVSCSIMsgDescDevStatusChanged *)e; | |
932 | ||
933 | printk(KERN_INFO | |
934 | "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n", | |
935 | desc->bus, desc->target, desc->lun[1]); | |
936 | ||
937 | if (!scsi_host_get(host)) | |
938 | return; | |
939 | ||
940 | sdev = scsi_device_lookup(host, desc->bus, desc->target, | |
941 | desc->lun[1]); | |
942 | if (sdev) { | |
943 | printk(KERN_INFO "vmw_pvscsi: device already exists\n"); | |
944 | scsi_device_put(sdev); | |
945 | } else | |
946 | scsi_add_device(adapter->host, desc->bus, | |
947 | desc->target, desc->lun[1]); | |
948 | ||
949 | scsi_host_put(host); | |
950 | } else if (e->type == PVSCSI_MSG_DEV_REMOVED) { | |
951 | struct PVSCSIMsgDescDevStatusChanged *desc; | |
952 | desc = (struct PVSCSIMsgDescDevStatusChanged *)e; | |
953 | ||
954 | printk(KERN_INFO | |
955 | "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n", | |
956 | desc->bus, desc->target, desc->lun[1]); | |
957 | ||
958 | if (!scsi_host_get(host)) | |
959 | return; | |
960 | ||
961 | sdev = scsi_device_lookup(host, desc->bus, desc->target, | |
962 | desc->lun[1]); | |
963 | if (sdev) { | |
964 | scsi_remove_device(sdev); | |
965 | scsi_device_put(sdev); | |
966 | } else | |
967 | printk(KERN_INFO | |
968 | "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n", | |
969 | desc->bus, desc->target, desc->lun[1]); | |
970 | ||
971 | scsi_host_put(host); | |
972 | } | |
973 | } | |
974 | ||
975 | static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter) | |
976 | { | |
977 | struct PVSCSIRingsState *s = adapter->rings_state; | |
978 | ||
979 | return s->msgProdIdx != s->msgConsIdx; | |
980 | } | |
981 | ||
982 | static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter) | |
983 | { | |
984 | struct PVSCSIRingsState *s = adapter->rings_state; | |
985 | struct PVSCSIRingMsgDesc *ring = adapter->msg_ring; | |
986 | u32 msg_entries = s->msgNumEntriesLog2; | |
987 | ||
988 | while (pvscsi_msg_pending(adapter)) { | |
989 | struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx & | |
990 | MASK(msg_entries)); | |
991 | ||
992 | barrier(); | |
993 | pvscsi_process_msg(adapter, e); | |
994 | barrier(); | |
995 | s->msgConsIdx++; | |
996 | } | |
997 | } | |
998 | ||
999 | static void pvscsi_msg_workqueue_handler(struct work_struct *data) | |
1000 | { | |
1001 | struct pvscsi_adapter *adapter; | |
1002 | ||
1003 | adapter = container_of(data, struct pvscsi_adapter, work); | |
1004 | ||
1005 | pvscsi_process_msg_ring(adapter); | |
1006 | } | |
1007 | ||
1008 | static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter) | |
1009 | { | |
1010 | char name[32]; | |
1011 | ||
1012 | if (!pvscsi_use_msg) | |
1013 | return 0; | |
1014 | ||
1015 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, | |
1016 | PVSCSI_CMD_SETUP_MSG_RING); | |
1017 | ||
1018 | if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1) | |
1019 | return 0; | |
1020 | ||
1021 | snprintf(name, sizeof(name), | |
1022 | "vmw_pvscsi_wq_%u", adapter->host->host_no); | |
1023 | ||
1024 | adapter->workqueue = create_singlethread_workqueue(name); | |
1025 | if (!adapter->workqueue) { | |
1026 | printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n"); | |
1027 | return 0; | |
1028 | } | |
1029 | INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler); | |
1030 | ||
1031 | return 1; | |
1032 | } | |
1033 | ||
1034 | static irqreturn_t pvscsi_isr(int irq, void *devp) | |
1035 | { | |
1036 | struct pvscsi_adapter *adapter = devp; | |
1037 | int handled; | |
1038 | ||
1039 | if (adapter->use_msi || adapter->use_msix) | |
1040 | handled = true; | |
1041 | else { | |
1042 | u32 val = pvscsi_read_intr_status(adapter); | |
1043 | handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0; | |
1044 | if (handled) | |
1045 | pvscsi_write_intr_status(devp, val); | |
1046 | } | |
1047 | ||
1048 | if (handled) { | |
1049 | unsigned long flags; | |
1050 | ||
1051 | spin_lock_irqsave(&adapter->hw_lock, flags); | |
1052 | ||
1053 | pvscsi_process_completion_ring(adapter); | |
1054 | if (adapter->use_msg && pvscsi_msg_pending(adapter)) | |
1055 | queue_work(adapter->workqueue, &adapter->work); | |
1056 | ||
1057 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | |
1058 | } | |
1059 | ||
1060 | return IRQ_RETVAL(handled); | |
1061 | } | |
1062 | ||
1063 | static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter) | |
1064 | { | |
1065 | struct pvscsi_ctx *ctx = adapter->cmd_map; | |
1066 | unsigned i; | |
1067 | ||
1068 | for (i = 0; i < adapter->req_depth; ++i, ++ctx) | |
1069 | free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); | |
1070 | } | |
1071 | ||
d0e2ddff DT |
1072 | static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, |
1073 | unsigned int *irq) | |
851b1642 AK |
1074 | { |
1075 | struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION }; | |
1076 | int ret; | |
1077 | ||
1078 | ret = pci_enable_msix(adapter->dev, &entry, 1); | |
1079 | if (ret) | |
1080 | return ret; | |
1081 | ||
1082 | *irq = entry.vector; | |
1083 | ||
1084 | return 0; | |
1085 | } | |
1086 | ||
1087 | static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter) | |
1088 | { | |
1089 | if (adapter->irq) { | |
1090 | free_irq(adapter->irq, adapter); | |
1091 | adapter->irq = 0; | |
1092 | } | |
1093 | if (adapter->use_msi) { | |
1094 | pci_disable_msi(adapter->dev); | |
1095 | adapter->use_msi = 0; | |
1096 | } else if (adapter->use_msix) { | |
1097 | pci_disable_msix(adapter->dev); | |
1098 | adapter->use_msix = 0; | |
1099 | } | |
1100 | } | |
1101 | ||
1102 | static void pvscsi_release_resources(struct pvscsi_adapter *adapter) | |
1103 | { | |
1104 | pvscsi_shutdown_intr(adapter); | |
1105 | ||
1106 | if (adapter->workqueue) | |
1107 | destroy_workqueue(adapter->workqueue); | |
1108 | ||
1109 | if (adapter->mmioBase) | |
1110 | pci_iounmap(adapter->dev, adapter->mmioBase); | |
1111 | ||
1112 | pci_release_regions(adapter->dev); | |
1113 | ||
1114 | if (adapter->cmd_map) { | |
1115 | pvscsi_free_sgls(adapter); | |
1116 | kfree(adapter->cmd_map); | |
1117 | } | |
1118 | ||
1119 | if (adapter->rings_state) | |
1120 | pci_free_consistent(adapter->dev, PAGE_SIZE, | |
1121 | adapter->rings_state, adapter->ringStatePA); | |
1122 | ||
1123 | if (adapter->req_ring) | |
1124 | pci_free_consistent(adapter->dev, | |
1125 | adapter->req_pages * PAGE_SIZE, | |
1126 | adapter->req_ring, adapter->reqRingPA); | |
1127 | ||
1128 | if (adapter->cmp_ring) | |
1129 | pci_free_consistent(adapter->dev, | |
1130 | adapter->cmp_pages * PAGE_SIZE, | |
1131 | adapter->cmp_ring, adapter->cmpRingPA); | |
1132 | ||
1133 | if (adapter->msg_ring) | |
1134 | pci_free_consistent(adapter->dev, | |
1135 | adapter->msg_pages * PAGE_SIZE, | |
1136 | adapter->msg_ring, adapter->msgRingPA); | |
1137 | } | |
1138 | ||
1139 | /* | |
1140 | * Allocate scatter gather lists. | |
1141 | * | |
1142 | * These are statically allocated. Trying to be clever was not worth it. | |
1143 | * | |
1144 | * Dynamic allocation can fail, and we can't go deeep into the memory | |
1145 | * allocator, since we're a SCSI driver, and trying too hard to allocate | |
1146 | * memory might generate disk I/O. We also don't want to fail disk I/O | |
1147 | * in that case because we can't get an allocation - the I/O could be | |
1148 | * trying to swap out data to free memory. Since that is pathological, | |
1149 | * just use a statically allocated scatter list. | |
1150 | * | |
1151 | */ | |
1152 | static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter) | |
1153 | { | |
1154 | struct pvscsi_ctx *ctx; | |
1155 | int i; | |
1156 | ||
1157 | ctx = adapter->cmd_map; | |
1158 | BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE); | |
1159 | ||
1160 | for (i = 0; i < adapter->req_depth; ++i, ++ctx) { | |
1161 | ctx->sgl = (void *)__get_free_pages(GFP_KERNEL, | |
1162 | get_order(SGL_SIZE)); | |
1163 | ctx->sglPA = 0; | |
1164 | BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE)); | |
1165 | if (!ctx->sgl) { | |
1166 | for (; i >= 0; --i, --ctx) { | |
1167 | free_pages((unsigned long)ctx->sgl, | |
1168 | get_order(SGL_SIZE)); | |
1169 | ctx->sgl = NULL; | |
1170 | } | |
1171 | return -ENOMEM; | |
1172 | } | |
1173 | } | |
1174 | ||
1175 | return 0; | |
1176 | } | |
1177 | ||
1178 | static int __devinit pvscsi_probe(struct pci_dev *pdev, | |
1179 | const struct pci_device_id *id) | |
1180 | { | |
1181 | struct pvscsi_adapter *adapter; | |
1182 | struct Scsi_Host *host; | |
1183 | unsigned int i; | |
1184 | unsigned long flags = 0; | |
1185 | int error; | |
1186 | ||
1187 | error = -ENODEV; | |
1188 | ||
1189 | if (pci_enable_device(pdev)) | |
1190 | return error; | |
1191 | ||
1192 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 && | |
1193 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { | |
1194 | printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n"); | |
1195 | } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 && | |
1196 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) { | |
1197 | printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n"); | |
1198 | } else { | |
1199 | printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n"); | |
1200 | goto out_disable_device; | |
1201 | } | |
1202 | ||
1203 | pvscsi_template.can_queue = | |
1204 | min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) * | |
1205 | PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; | |
1206 | pvscsi_template.cmd_per_lun = | |
1207 | min(pvscsi_template.can_queue, pvscsi_cmd_per_lun); | |
1208 | host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter)); | |
1209 | if (!host) { | |
1210 | printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n"); | |
1211 | goto out_disable_device; | |
1212 | } | |
1213 | ||
1214 | adapter = shost_priv(host); | |
1215 | memset(adapter, 0, sizeof(*adapter)); | |
1216 | adapter->dev = pdev; | |
1217 | adapter->host = host; | |
1218 | ||
1219 | spin_lock_init(&adapter->hw_lock); | |
1220 | ||
1221 | host->max_channel = 0; | |
1222 | host->max_id = 16; | |
1223 | host->max_lun = 1; | |
1224 | host->max_cmd_len = 16; | |
1225 | ||
1226 | adapter->rev = pdev->revision; | |
1227 | ||
1228 | if (pci_request_regions(pdev, "vmw_pvscsi")) { | |
1229 | printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n"); | |
1230 | goto out_free_host; | |
1231 | } | |
1232 | ||
1233 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | |
1234 | if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)) | |
1235 | continue; | |
1236 | ||
1237 | if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE) | |
1238 | continue; | |
1239 | ||
1240 | break; | |
1241 | } | |
1242 | ||
1243 | if (i == DEVICE_COUNT_RESOURCE) { | |
1244 | printk(KERN_ERR | |
1245 | "vmw_pvscsi: adapter has no suitable MMIO region\n"); | |
1246 | goto out_release_resources; | |
1247 | } | |
1248 | ||
1249 | adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE); | |
1250 | ||
1251 | if (!adapter->mmioBase) { | |
1252 | printk(KERN_ERR | |
1253 | "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n", | |
1254 | i, PVSCSI_MEM_SPACE_SIZE); | |
1255 | goto out_release_resources; | |
1256 | } | |
1257 | ||
1258 | pci_set_master(pdev); | |
1259 | pci_set_drvdata(pdev, host); | |
1260 | ||
1261 | ll_adapter_reset(adapter); | |
1262 | ||
1263 | adapter->use_msg = pvscsi_setup_msg_workqueue(adapter); | |
1264 | ||
1265 | error = pvscsi_allocate_rings(adapter); | |
1266 | if (error) { | |
1267 | printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n"); | |
1268 | goto out_release_resources; | |
1269 | } | |
1270 | ||
1271 | /* | |
1272 | * From this point on we should reset the adapter if anything goes | |
1273 | * wrong. | |
1274 | */ | |
1275 | pvscsi_setup_all_rings(adapter); | |
1276 | ||
1277 | adapter->cmd_map = kcalloc(adapter->req_depth, | |
1278 | sizeof(struct pvscsi_ctx), GFP_KERNEL); | |
1279 | if (!adapter->cmd_map) { | |
1280 | printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n"); | |
1281 | error = -ENOMEM; | |
1282 | goto out_reset_adapter; | |
1283 | } | |
1284 | ||
1285 | INIT_LIST_HEAD(&adapter->cmd_pool); | |
1286 | for (i = 0; i < adapter->req_depth; i++) { | |
1287 | struct pvscsi_ctx *ctx = adapter->cmd_map + i; | |
1288 | list_add(&ctx->list, &adapter->cmd_pool); | |
1289 | } | |
1290 | ||
1291 | error = pvscsi_allocate_sg(adapter); | |
1292 | if (error) { | |
1293 | printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n"); | |
1294 | goto out_reset_adapter; | |
1295 | } | |
1296 | ||
1297 | if (!pvscsi_disable_msix && | |
1298 | pvscsi_setup_msix(adapter, &adapter->irq) == 0) { | |
1299 | printk(KERN_INFO "vmw_pvscsi: using MSI-X\n"); | |
1300 | adapter->use_msix = 1; | |
1301 | } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) { | |
1302 | printk(KERN_INFO "vmw_pvscsi: using MSI\n"); | |
1303 | adapter->use_msi = 1; | |
1304 | adapter->irq = pdev->irq; | |
1305 | } else { | |
1306 | printk(KERN_INFO "vmw_pvscsi: using INTx\n"); | |
1307 | adapter->irq = pdev->irq; | |
1308 | flags = IRQF_SHARED; | |
1309 | } | |
1310 | ||
1311 | error = request_irq(adapter->irq, pvscsi_isr, flags, | |
1312 | "vmw_pvscsi", adapter); | |
1313 | if (error) { | |
1314 | printk(KERN_ERR | |
1315 | "vmw_pvscsi: unable to request IRQ: %d\n", error); | |
1316 | adapter->irq = 0; | |
1317 | goto out_reset_adapter; | |
1318 | } | |
1319 | ||
1320 | error = scsi_add_host(host, &pdev->dev); | |
1321 | if (error) { | |
1322 | printk(KERN_ERR | |
1323 | "vmw_pvscsi: scsi_add_host failed: %d\n", error); | |
1324 | goto out_reset_adapter; | |
1325 | } | |
1326 | ||
1327 | dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n", | |
1328 | adapter->rev, host->host_no); | |
1329 | ||
1330 | pvscsi_unmask_intr(adapter); | |
1331 | ||
1332 | scsi_scan_host(host); | |
1333 | ||
1334 | return 0; | |
1335 | ||
1336 | out_reset_adapter: | |
1337 | ll_adapter_reset(adapter); | |
1338 | out_release_resources: | |
1339 | pvscsi_release_resources(adapter); | |
1340 | out_free_host: | |
1341 | scsi_host_put(host); | |
1342 | out_disable_device: | |
1343 | pci_set_drvdata(pdev, NULL); | |
1344 | pci_disable_device(pdev); | |
1345 | ||
1346 | return error; | |
1347 | } | |
1348 | ||
1349 | static void __pvscsi_shutdown(struct pvscsi_adapter *adapter) | |
1350 | { | |
1351 | pvscsi_mask_intr(adapter); | |
1352 | ||
1353 | if (adapter->workqueue) | |
1354 | flush_workqueue(adapter->workqueue); | |
1355 | ||
1356 | pvscsi_shutdown_intr(adapter); | |
1357 | ||
1358 | pvscsi_process_request_ring(adapter); | |
1359 | pvscsi_process_completion_ring(adapter); | |
1360 | ll_adapter_reset(adapter); | |
1361 | } | |
1362 | ||
1363 | static void pvscsi_shutdown(struct pci_dev *dev) | |
1364 | { | |
1365 | struct Scsi_Host *host = pci_get_drvdata(dev); | |
1366 | struct pvscsi_adapter *adapter = shost_priv(host); | |
1367 | ||
1368 | __pvscsi_shutdown(adapter); | |
1369 | } | |
1370 | ||
1371 | static void pvscsi_remove(struct pci_dev *pdev) | |
1372 | { | |
1373 | struct Scsi_Host *host = pci_get_drvdata(pdev); | |
1374 | struct pvscsi_adapter *adapter = shost_priv(host); | |
1375 | ||
1376 | scsi_remove_host(host); | |
1377 | ||
1378 | __pvscsi_shutdown(adapter); | |
1379 | pvscsi_release_resources(adapter); | |
1380 | ||
1381 | scsi_host_put(host); | |
1382 | ||
1383 | pci_set_drvdata(pdev, NULL); | |
1384 | pci_disable_device(pdev); | |
1385 | } | |
1386 | ||
1387 | static struct pci_driver pvscsi_pci_driver = { | |
1388 | .name = "vmw_pvscsi", | |
1389 | .id_table = pvscsi_pci_tbl, | |
1390 | .probe = pvscsi_probe, | |
1391 | .remove = __devexit_p(pvscsi_remove), | |
1392 | .shutdown = pvscsi_shutdown, | |
1393 | }; | |
1394 | ||
1395 | static int __init pvscsi_init(void) | |
1396 | { | |
1397 | pr_info("%s - version %s\n", | |
1398 | PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING); | |
1399 | return pci_register_driver(&pvscsi_pci_driver); | |
1400 | } | |
1401 | ||
1402 | static void __exit pvscsi_exit(void) | |
1403 | { | |
1404 | pci_unregister_driver(&pvscsi_pci_driver); | |
1405 | } | |
1406 | ||
1407 | module_init(pvscsi_init); | |
1408 | module_exit(pvscsi_exit); |