Commit | Line | Data |
---|---|---|
e67f86b3 AB |
1 | /* Copyright 2012 STEC, Inc. |
2 | * | |
3 | * This file is licensed under the terms of the 3-clause | |
4 | * BSD License (http://opensource.org/licenses/BSD-3-Clause) | |
5 | * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html), | |
6 | * at your option. Both licenses are also available in the LICENSE file | |
7 | * distributed with this project. This file may not be copied, modified, | |
8 | * or distributed except in accordance with those terms. | |
9 | * Gordoni Waidhofer <gwaidhofer@stec-inc.com> | |
10 | * Initial Driver Design! | |
11 | * Thomas Swann <tswann@stec-inc.com> | |
12 | * Interrupt handling. | |
13 | * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com> | |
14 | * biomode implementation. | |
15 | * Akhil Bhansali <abhansali@stec-inc.com> | |
16 | * Added support for DISCARD / FLUSH and FUA. | |
17 | */ | |
18 | ||
19 | #include <linux/kernel.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/pci.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/blkdev.h> | |
26 | #include <linux/sched.h> | |
27 | #include <linux/interrupt.h> | |
28 | #include <linux/compiler.h> | |
29 | #include <linux/workqueue.h> | |
30 | #include <linux/bitops.h> | |
31 | #include <linux/delay.h> | |
32 | #include <linux/time.h> | |
33 | #include <linux/hdreg.h> | |
34 | #include <linux/dma-mapping.h> | |
35 | #include <linux/completion.h> | |
36 | #include <linux/scatterlist.h> | |
37 | #include <linux/version.h> | |
38 | #include <linux/err.h> | |
39 | #include <linux/scatterlist.h> | |
40 | #include <linux/aer.h> | |
41 | #include <linux/ctype.h> | |
42 | #include <linux/wait.h> | |
43 | #include <linux/uio.h> | |
44 | #include <scsi/scsi.h> | |
e67f86b3 AB |
45 | #include <scsi/sg.h> |
46 | #include <linux/io.h> | |
47 | #include <linux/uaccess.h> | |
4ca90b53 | 48 | #include <asm/unaligned.h> |
e67f86b3 AB |
49 | |
50 | #include "skd_s1120.h" | |
51 | ||
52 | static int skd_dbg_level; | |
53 | static int skd_isr_comp_limit = 4; | |
54 | ||
55 | enum { | |
56 | STEC_LINK_2_5GTS = 0, | |
57 | STEC_LINK_5GTS = 1, | |
58 | STEC_LINK_8GTS = 2, | |
59 | STEC_LINK_UNKNOWN = 0xFF | |
60 | }; | |
61 | ||
62 | enum { | |
63 | SKD_FLUSH_INITIALIZER, | |
64 | SKD_FLUSH_ZERO_SIZE_FIRST, | |
65 | SKD_FLUSH_DATA_SECOND, | |
66 | }; | |
67 | ||
e67f86b3 AB |
68 | #define SKD_ASSERT(expr) \ |
69 | do { \ | |
70 | if (unlikely(!(expr))) { \ | |
71 | pr_err("Assertion failed! %s,%s,%s,line=%d\n", \ | |
72 | # expr, __FILE__, __func__, __LINE__); \ | |
73 | } \ | |
74 | } while (0) | |
75 | ||
e67f86b3 AB |
76 | #define DRV_NAME "skd" |
77 | #define DRV_VERSION "2.2.1" | |
78 | #define DRV_BUILD_ID "0260" | |
79 | #define PFX DRV_NAME ": " | |
80 | #define DRV_BIN_VERSION 0x100 | |
81 | #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID | |
82 | ||
83 | MODULE_AUTHOR("bug-reports: support@stec-inc.com"); | |
84 | MODULE_LICENSE("Dual BSD/GPL"); | |
85 | ||
38d4a1bb | 86 | MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")"); |
e67f86b3 AB |
87 | MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID); |
88 | ||
89 | #define PCI_VENDOR_ID_STEC 0x1B39 | |
90 | #define PCI_DEVICE_ID_S1120 0x0001 | |
91 | ||
92 | #define SKD_FUA_NV (1 << 1) | |
93 | #define SKD_MINORS_PER_DEVICE 16 | |
94 | ||
95 | #define SKD_MAX_QUEUE_DEPTH 200u | |
96 | ||
97 | #define SKD_PAUSE_TIMEOUT (5 * 1000) | |
98 | ||
99 | #define SKD_N_FITMSG_BYTES (512u) | |
100 | ||
101 | #define SKD_N_SPECIAL_CONTEXT 32u | |
102 | #define SKD_N_SPECIAL_FITMSG_BYTES (128u) | |
103 | ||
104 | /* SG elements are 32 bytes, so we can make this 4096 and still be under the | |
105 | * 128KB limit. That allows 4096*4K = 16M xfer size | |
106 | */ | |
107 | #define SKD_N_SG_PER_REQ_DEFAULT 256u | |
108 | #define SKD_N_SG_PER_SPECIAL 256u | |
109 | ||
110 | #define SKD_N_COMPLETION_ENTRY 256u | |
111 | #define SKD_N_READ_CAP_BYTES (8u) | |
112 | ||
113 | #define SKD_N_INTERNAL_BYTES (512u) | |
114 | ||
115 | /* 5 bits of uniqifier, 0xF800 */ | |
116 | #define SKD_ID_INCR (0x400) | |
117 | #define SKD_ID_TABLE_MASK (3u << 8u) | |
118 | #define SKD_ID_RW_REQUEST (0u << 8u) | |
119 | #define SKD_ID_INTERNAL (1u << 8u) | |
120 | #define SKD_ID_SPECIAL_REQUEST (2u << 8u) | |
121 | #define SKD_ID_FIT_MSG (3u << 8u) | |
122 | #define SKD_ID_SLOT_MASK 0x00FFu | |
123 | #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu | |
124 | ||
125 | #define SKD_N_TIMEOUT_SLOT 4u | |
126 | #define SKD_TIMEOUT_SLOT_MASK 3u | |
127 | ||
128 | #define SKD_N_MAX_SECTORS 2048u | |
129 | ||
130 | #define SKD_MAX_RETRIES 2u | |
131 | ||
132 | #define SKD_TIMER_SECONDS(seconds) (seconds) | |
133 | #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60)) | |
134 | ||
135 | #define INQ_STD_NBYTES 36 | |
136 | #define SKD_DISCARD_CDB_LENGTH 24 | |
137 | ||
138 | enum skd_drvr_state { | |
139 | SKD_DRVR_STATE_LOAD, | |
140 | SKD_DRVR_STATE_IDLE, | |
141 | SKD_DRVR_STATE_BUSY, | |
142 | SKD_DRVR_STATE_STARTING, | |
143 | SKD_DRVR_STATE_ONLINE, | |
144 | SKD_DRVR_STATE_PAUSING, | |
145 | SKD_DRVR_STATE_PAUSED, | |
146 | SKD_DRVR_STATE_DRAINING_TIMEOUT, | |
147 | SKD_DRVR_STATE_RESTARTING, | |
148 | SKD_DRVR_STATE_RESUMING, | |
149 | SKD_DRVR_STATE_STOPPING, | |
150 | SKD_DRVR_STATE_FAULT, | |
151 | SKD_DRVR_STATE_DISAPPEARED, | |
152 | SKD_DRVR_STATE_PROTOCOL_MISMATCH, | |
153 | SKD_DRVR_STATE_BUSY_ERASE, | |
154 | SKD_DRVR_STATE_BUSY_SANITIZE, | |
155 | SKD_DRVR_STATE_BUSY_IMMINENT, | |
156 | SKD_DRVR_STATE_WAIT_BOOT, | |
157 | SKD_DRVR_STATE_SYNCING, | |
158 | }; | |
159 | ||
160 | #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u) | |
161 | #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u) | |
162 | #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u) | |
163 | #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u) | |
164 | #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u) | |
165 | #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u) | |
166 | #define SKD_START_WAIT_SECONDS 90u | |
167 | ||
168 | enum skd_req_state { | |
169 | SKD_REQ_STATE_IDLE, | |
170 | SKD_REQ_STATE_SETUP, | |
171 | SKD_REQ_STATE_BUSY, | |
172 | SKD_REQ_STATE_COMPLETED, | |
173 | SKD_REQ_STATE_TIMEOUT, | |
174 | SKD_REQ_STATE_ABORTED, | |
175 | }; | |
176 | ||
177 | enum skd_fit_msg_state { | |
178 | SKD_MSG_STATE_IDLE, | |
179 | SKD_MSG_STATE_BUSY, | |
180 | }; | |
181 | ||
182 | enum skd_check_status_action { | |
183 | SKD_CHECK_STATUS_REPORT_GOOD, | |
184 | SKD_CHECK_STATUS_REPORT_SMART_ALERT, | |
185 | SKD_CHECK_STATUS_REQUEUE_REQUEST, | |
186 | SKD_CHECK_STATUS_REPORT_ERROR, | |
187 | SKD_CHECK_STATUS_BUSY_IMMINENT, | |
188 | }; | |
189 | ||
190 | struct skd_fitmsg_context { | |
191 | enum skd_fit_msg_state state; | |
192 | ||
193 | struct skd_fitmsg_context *next; | |
194 | ||
195 | u32 id; | |
196 | u16 outstanding; | |
197 | ||
198 | u32 length; | |
199 | u32 offset; | |
200 | ||
201 | u8 *msg_buf; | |
202 | dma_addr_t mb_dma_address; | |
203 | }; | |
204 | ||
205 | struct skd_request_context { | |
206 | enum skd_req_state state; | |
207 | ||
208 | struct skd_request_context *next; | |
209 | ||
210 | u16 id; | |
211 | u32 fitmsg_id; | |
212 | ||
213 | struct request *req; | |
e67f86b3 AB |
214 | u8 flush_cmd; |
215 | u8 discard_page; | |
216 | ||
217 | u32 timeout_stamp; | |
218 | u8 sg_data_dir; | |
219 | struct scatterlist *sg; | |
220 | u32 n_sg; | |
221 | u32 sg_byte_count; | |
222 | ||
223 | struct fit_sg_descriptor *sksg_list; | |
224 | dma_addr_t sksg_dma_address; | |
225 | ||
226 | struct fit_completion_entry_v1 completion; | |
227 | ||
228 | struct fit_comp_error_info err_info; | |
229 | ||
230 | }; | |
231 | #define SKD_DATA_DIR_HOST_TO_CARD 1 | |
232 | #define SKD_DATA_DIR_CARD_TO_HOST 2 | |
233 | #define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */ | |
234 | ||
235 | struct skd_special_context { | |
236 | struct skd_request_context req; | |
237 | ||
238 | u8 orphaned; | |
239 | ||
240 | void *data_buf; | |
241 | dma_addr_t db_dma_address; | |
242 | ||
243 | u8 *msg_buf; | |
244 | dma_addr_t mb_dma_address; | |
245 | }; | |
246 | ||
247 | struct skd_sg_io { | |
248 | fmode_t mode; | |
249 | void __user *argp; | |
250 | ||
251 | struct sg_io_hdr sg; | |
252 | ||
253 | u8 cdb[16]; | |
254 | ||
255 | u32 dxfer_len; | |
256 | u32 iovcnt; | |
257 | struct sg_iovec *iov; | |
258 | struct sg_iovec no_iov_iov; | |
259 | ||
260 | struct skd_special_context *skspcl; | |
261 | }; | |
262 | ||
263 | typedef enum skd_irq_type { | |
264 | SKD_IRQ_LEGACY, | |
265 | SKD_IRQ_MSI, | |
266 | SKD_IRQ_MSIX | |
267 | } skd_irq_type_t; | |
268 | ||
269 | #define SKD_MAX_BARS 2 | |
270 | ||
271 | struct skd_device { | |
272 | volatile void __iomem *mem_map[SKD_MAX_BARS]; | |
273 | resource_size_t mem_phys[SKD_MAX_BARS]; | |
274 | u32 mem_size[SKD_MAX_BARS]; | |
275 | ||
276 | skd_irq_type_t irq_type; | |
277 | u32 msix_count; | |
278 | struct skd_msix_entry *msix_entries; | |
279 | ||
280 | struct pci_dev *pdev; | |
281 | int pcie_error_reporting_is_enabled; | |
282 | ||
283 | spinlock_t lock; | |
284 | struct gendisk *disk; | |
285 | struct request_queue *queue; | |
286 | struct device *class_dev; | |
287 | int gendisk_on; | |
288 | int sync_done; | |
289 | ||
290 | atomic_t device_count; | |
291 | u32 devno; | |
292 | u32 major; | |
293 | char name[32]; | |
294 | char isr_name[30]; | |
295 | ||
296 | enum skd_drvr_state state; | |
297 | u32 drive_state; | |
298 | ||
299 | u32 in_flight; | |
300 | u32 cur_max_queue_depth; | |
301 | u32 queue_low_water_mark; | |
302 | u32 dev_max_queue_depth; | |
303 | ||
304 | u32 num_fitmsg_context; | |
305 | u32 num_req_context; | |
306 | ||
307 | u32 timeout_slot[SKD_N_TIMEOUT_SLOT]; | |
308 | u32 timeout_stamp; | |
309 | struct skd_fitmsg_context *skmsg_free_list; | |
310 | struct skd_fitmsg_context *skmsg_table; | |
311 | ||
312 | struct skd_request_context *skreq_free_list; | |
313 | struct skd_request_context *skreq_table; | |
314 | ||
315 | struct skd_special_context *skspcl_free_list; | |
316 | struct skd_special_context *skspcl_table; | |
317 | ||
318 | struct skd_special_context internal_skspcl; | |
319 | u32 read_cap_blocksize; | |
320 | u32 read_cap_last_lba; | |
321 | int read_cap_is_valid; | |
322 | int inquiry_is_valid; | |
323 | u8 inq_serial_num[13]; /*12 chars plus null term */ | |
324 | u8 id_str[80]; /* holds a composite name (pci + sernum) */ | |
325 | ||
326 | u8 skcomp_cycle; | |
327 | u32 skcomp_ix; | |
328 | struct fit_completion_entry_v1 *skcomp_table; | |
329 | struct fit_comp_error_info *skerr_table; | |
330 | dma_addr_t cq_dma_address; | |
331 | ||
332 | wait_queue_head_t waitq; | |
333 | ||
334 | struct timer_list timer; | |
335 | u32 timer_countdown; | |
336 | u32 timer_substate; | |
337 | ||
338 | int n_special; | |
339 | int sgs_per_request; | |
340 | u32 last_mtd; | |
341 | ||
342 | u32 proto_ver; | |
343 | ||
344 | int dbg_level; | |
345 | u32 connect_time_stamp; | |
346 | int connect_retries; | |
347 | #define SKD_MAX_CONNECT_RETRIES 16 | |
348 | u32 drive_jiffies; | |
349 | ||
350 | u32 timo_slot; | |
351 | ||
e67f86b3 | 352 | |
38d4a1bb | 353 | struct work_struct completion_worker; |
e67f86b3 AB |
354 | }; |
355 | ||
356 | #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF) | |
357 | #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF) | |
358 | #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF) | |
359 | ||
360 | static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset) | |
361 | { | |
362 | u32 val; | |
363 | ||
364 | if (likely(skdev->dbg_level < 2)) | |
365 | return readl(skdev->mem_map[1] + offset); | |
366 | else { | |
367 | barrier(); | |
368 | val = readl(skdev->mem_map[1] + offset); | |
369 | barrier(); | |
2e44b427 | 370 | pr_debug("%s:%s:%d offset %x = %x\n", |
371 | skdev->name, __func__, __LINE__, offset, val); | |
e67f86b3 AB |
372 | return val; |
373 | } | |
374 | ||
375 | } | |
376 | ||
377 | static inline void skd_reg_write32(struct skd_device *skdev, u32 val, | |
378 | u32 offset) | |
379 | { | |
380 | if (likely(skdev->dbg_level < 2)) { | |
381 | writel(val, skdev->mem_map[1] + offset); | |
382 | barrier(); | |
e67f86b3 AB |
383 | } else { |
384 | barrier(); | |
385 | writel(val, skdev->mem_map[1] + offset); | |
386 | barrier(); | |
2e44b427 | 387 | pr_debug("%s:%s:%d offset %x = %x\n", |
388 | skdev->name, __func__, __LINE__, offset, val); | |
e67f86b3 AB |
389 | } |
390 | } | |
391 | ||
392 | static inline void skd_reg_write64(struct skd_device *skdev, u64 val, | |
393 | u32 offset) | |
394 | { | |
395 | if (likely(skdev->dbg_level < 2)) { | |
396 | writeq(val, skdev->mem_map[1] + offset); | |
397 | barrier(); | |
e67f86b3 AB |
398 | } else { |
399 | barrier(); | |
400 | writeq(val, skdev->mem_map[1] + offset); | |
401 | barrier(); | |
2e44b427 | 402 | pr_debug("%s:%s:%d offset %x = %016llx\n", |
403 | skdev->name, __func__, __LINE__, offset, val); | |
e67f86b3 AB |
404 | } |
405 | } | |
406 | ||
407 | ||
408 | #define SKD_IRQ_DEFAULT SKD_IRQ_MSI | |
409 | static int skd_isr_type = SKD_IRQ_DEFAULT; | |
410 | ||
411 | module_param(skd_isr_type, int, 0444); | |
412 | MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability." | |
413 | " (0==legacy, 1==MSI, 2==MSI-X, default==1)"); | |
414 | ||
415 | #define SKD_MAX_REQ_PER_MSG_DEFAULT 1 | |
416 | static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; | |
417 | ||
418 | module_param(skd_max_req_per_msg, int, 0444); | |
419 | MODULE_PARM_DESC(skd_max_req_per_msg, | |
420 | "Maximum SCSI requests packed in a single message." | |
421 | " (1-14, default==1)"); | |
422 | ||
423 | #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64 | |
424 | #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64" | |
425 | static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; | |
426 | ||
427 | module_param(skd_max_queue_depth, int, 0444); | |
428 | MODULE_PARM_DESC(skd_max_queue_depth, | |
429 | "Maximum SCSI requests issued to s1120." | |
430 | " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")"); | |
431 | ||
432 | static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; | |
433 | module_param(skd_sgs_per_request, int, 0444); | |
434 | MODULE_PARM_DESC(skd_sgs_per_request, | |
435 | "Maximum SG elements per block request." | |
436 | " (1-4096, default==256)"); | |
437 | ||
438 | static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT; | |
439 | module_param(skd_max_pass_thru, int, 0444); | |
440 | MODULE_PARM_DESC(skd_max_pass_thru, | |
441 | "Maximum SCSI pass-thru at a time." " (1-50, default==32)"); | |
442 | ||
443 | module_param(skd_dbg_level, int, 0444); | |
444 | MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)"); | |
445 | ||
446 | module_param(skd_isr_comp_limit, int, 0444); | |
447 | MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4"); | |
448 | ||
e67f86b3 AB |
449 | /* Major device number dynamically assigned. */ |
450 | static u32 skd_major; | |
451 | ||
e67f86b3 AB |
452 | static void skd_destruct(struct skd_device *skdev); |
453 | static const struct block_device_operations skd_blockdev_ops; | |
454 | static void skd_send_fitmsg(struct skd_device *skdev, | |
455 | struct skd_fitmsg_context *skmsg); | |
456 | static void skd_send_special_fitmsg(struct skd_device *skdev, | |
457 | struct skd_special_context *skspcl); | |
458 | static void skd_request_fn(struct request_queue *rq); | |
459 | static void skd_end_request(struct skd_device *skdev, | |
460 | struct skd_request_context *skreq, int error); | |
461 | static int skd_preop_sg_list(struct skd_device *skdev, | |
462 | struct skd_request_context *skreq); | |
463 | static void skd_postop_sg_list(struct skd_device *skdev, | |
464 | struct skd_request_context *skreq); | |
465 | ||
466 | static void skd_restart_device(struct skd_device *skdev); | |
467 | static int skd_quiesce_dev(struct skd_device *skdev); | |
468 | static int skd_unquiesce_dev(struct skd_device *skdev); | |
469 | static void skd_release_special(struct skd_device *skdev, | |
470 | struct skd_special_context *skspcl); | |
471 | static void skd_disable_interrupts(struct skd_device *skdev); | |
472 | static void skd_isr_fwstate(struct skd_device *skdev); | |
473 | static void skd_recover_requests(struct skd_device *skdev, int requeue); | |
474 | static void skd_soft_reset(struct skd_device *skdev); | |
475 | ||
476 | static const char *skd_name(struct skd_device *skdev); | |
477 | const char *skd_drive_state_to_str(int state); | |
478 | const char *skd_skdev_state_to_str(enum skd_drvr_state state); | |
479 | static void skd_log_skdev(struct skd_device *skdev, const char *event); | |
480 | static void skd_log_skmsg(struct skd_device *skdev, | |
481 | struct skd_fitmsg_context *skmsg, const char *event); | |
482 | static void skd_log_skreq(struct skd_device *skdev, | |
483 | struct skd_request_context *skreq, const char *event); | |
484 | ||
e67f86b3 AB |
485 | /* |
486 | ***************************************************************************** | |
487 | * READ/WRITE REQUESTS | |
488 | ***************************************************************************** | |
489 | */ | |
fcd37eb3 | 490 | static void skd_fail_all_pending(struct skd_device *skdev) |
e67f86b3 AB |
491 | { |
492 | struct request_queue *q = skdev->queue; | |
493 | struct request *req; | |
494 | ||
495 | for (;; ) { | |
496 | req = blk_peek_request(q); | |
497 | if (req == NULL) | |
498 | break; | |
499 | blk_start_request(req); | |
500 | __blk_end_request_all(req, -EIO); | |
501 | } | |
502 | } | |
503 | ||
e67f86b3 AB |
504 | static void |
505 | skd_prep_rw_cdb(struct skd_scsi_request *scsi_req, | |
506 | int data_dir, unsigned lba, | |
507 | unsigned count) | |
508 | { | |
509 | if (data_dir == READ) | |
510 | scsi_req->cdb[0] = 0x28; | |
511 | else | |
512 | scsi_req->cdb[0] = 0x2a; | |
513 | ||
514 | scsi_req->cdb[1] = 0; | |
515 | scsi_req->cdb[2] = (lba & 0xff000000) >> 24; | |
516 | scsi_req->cdb[3] = (lba & 0xff0000) >> 16; | |
517 | scsi_req->cdb[4] = (lba & 0xff00) >> 8; | |
518 | scsi_req->cdb[5] = (lba & 0xff); | |
519 | scsi_req->cdb[6] = 0; | |
520 | scsi_req->cdb[7] = (count & 0xff00) >> 8; | |
521 | scsi_req->cdb[8] = count & 0xff; | |
522 | scsi_req->cdb[9] = 0; | |
523 | } | |
524 | ||
525 | static void | |
526 | skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req, | |
38d4a1bb | 527 | struct skd_request_context *skreq) |
e67f86b3 AB |
528 | { |
529 | skreq->flush_cmd = 1; | |
530 | ||
531 | scsi_req->cdb[0] = 0x35; | |
532 | scsi_req->cdb[1] = 0; | |
533 | scsi_req->cdb[2] = 0; | |
534 | scsi_req->cdb[3] = 0; | |
535 | scsi_req->cdb[4] = 0; | |
536 | scsi_req->cdb[5] = 0; | |
537 | scsi_req->cdb[6] = 0; | |
538 | scsi_req->cdb[7] = 0; | |
539 | scsi_req->cdb[8] = 0; | |
540 | scsi_req->cdb[9] = 0; | |
541 | } | |
542 | ||
543 | static void | |
544 | skd_prep_discard_cdb(struct skd_scsi_request *scsi_req, | |
38d4a1bb MS |
545 | struct skd_request_context *skreq, |
546 | struct page *page, | |
547 | u32 lba, u32 count) | |
e67f86b3 AB |
548 | { |
549 | char *buf; | |
550 | unsigned long len; | |
551 | struct request *req; | |
552 | ||
553 | buf = page_address(page); | |
554 | len = SKD_DISCARD_CDB_LENGTH; | |
555 | ||
556 | scsi_req->cdb[0] = UNMAP; | |
557 | scsi_req->cdb[8] = len; | |
558 | ||
559 | put_unaligned_be16(6 + 16, &buf[0]); | |
560 | put_unaligned_be16(16, &buf[2]); | |
561 | put_unaligned_be64(lba, &buf[8]); | |
562 | put_unaligned_be32(count, &buf[16]); | |
563 | ||
fcd37eb3 JA |
564 | req = skreq->req; |
565 | blk_add_request_payload(req, page, len); | |
e67f86b3 AB |
566 | } |
567 | ||
568 | static void skd_request_fn_not_online(struct request_queue *q); | |
569 | ||
570 | static void skd_request_fn(struct request_queue *q) | |
571 | { | |
572 | struct skd_device *skdev = q->queuedata; | |
573 | struct skd_fitmsg_context *skmsg = NULL; | |
574 | struct fit_msg_hdr *fmh = NULL; | |
575 | struct skd_request_context *skreq; | |
576 | struct request *req = NULL; | |
e67f86b3 AB |
577 | struct skd_scsi_request *scsi_req; |
578 | struct page *page; | |
579 | unsigned long io_flags; | |
580 | int error; | |
581 | u32 lba; | |
582 | u32 count; | |
583 | int data_dir; | |
584 | u32 be_lba; | |
585 | u32 be_count; | |
586 | u64 be_dmaa; | |
587 | u64 cmdctxt; | |
588 | u32 timo_slot; | |
589 | void *cmd_ptr; | |
590 | int flush, fua; | |
591 | ||
592 | if (skdev->state != SKD_DRVR_STATE_ONLINE) { | |
593 | skd_request_fn_not_online(q); | |
594 | return; | |
595 | } | |
596 | ||
6a5ec65b | 597 | if (blk_queue_stopped(skdev->queue)) { |
e67f86b3 AB |
598 | if (skdev->skmsg_free_list == NULL || |
599 | skdev->skreq_free_list == NULL || | |
600 | skdev->in_flight >= skdev->queue_low_water_mark) | |
601 | /* There is still some kind of shortage */ | |
602 | return; | |
603 | ||
6a5ec65b | 604 | queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue); |
e67f86b3 AB |
605 | } |
606 | ||
607 | /* | |
608 | * Stop conditions: | |
609 | * - There are no more native requests | |
610 | * - There are already the maximum number of requests in progress | |
611 | * - There are no more skd_request_context entries | |
612 | * - There are no more FIT msg buffers | |
613 | */ | |
614 | for (;; ) { | |
615 | ||
616 | flush = fua = 0; | |
617 | ||
fcd37eb3 | 618 | req = blk_peek_request(q); |
e67f86b3 | 619 | |
fcd37eb3 JA |
620 | /* Are there any native requests to start? */ |
621 | if (req == NULL) | |
622 | break; | |
e67f86b3 | 623 | |
fcd37eb3 JA |
624 | lba = (u32)blk_rq_pos(req); |
625 | count = blk_rq_sectors(req); | |
626 | data_dir = rq_data_dir(req); | |
627 | io_flags = req->cmd_flags; | |
e67f86b3 | 628 | |
fcd37eb3 JA |
629 | if (io_flags & REQ_FLUSH) |
630 | flush++; | |
e67f86b3 | 631 | |
fcd37eb3 JA |
632 | if (io_flags & REQ_FUA) |
633 | fua++; | |
e67f86b3 | 634 | |
fcd37eb3 JA |
635 | pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) " |
636 | "count=%u(0x%x) dir=%d\n", | |
637 | skdev->name, __func__, __LINE__, | |
638 | req, lba, lba, count, count, data_dir); | |
e67f86b3 | 639 | |
38d4a1bb | 640 | /* At this point we know there is a request */ |
e67f86b3 AB |
641 | |
642 | /* Are too many requets already in progress? */ | |
643 | if (skdev->in_flight >= skdev->cur_max_queue_depth) { | |
2e44b427 | 644 | pr_debug("%s:%s:%d qdepth %d, limit %d\n", |
645 | skdev->name, __func__, __LINE__, | |
646 | skdev->in_flight, skdev->cur_max_queue_depth); | |
e67f86b3 AB |
647 | break; |
648 | } | |
649 | ||
650 | /* Is a skd_request_context available? */ | |
651 | skreq = skdev->skreq_free_list; | |
652 | if (skreq == NULL) { | |
2e44b427 | 653 | pr_debug("%s:%s:%d Out of req=%p\n", |
654 | skdev->name, __func__, __LINE__, q); | |
e67f86b3 AB |
655 | break; |
656 | } | |
657 | SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE); | |
658 | SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0); | |
659 | ||
660 | /* Now we check to see if we can get a fit msg */ | |
661 | if (skmsg == NULL) { | |
662 | if (skdev->skmsg_free_list == NULL) { | |
2e44b427 | 663 | pr_debug("%s:%s:%d Out of msg\n", |
664 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
665 | break; |
666 | } | |
667 | } | |
668 | ||
669 | skreq->flush_cmd = 0; | |
670 | skreq->n_sg = 0; | |
671 | skreq->sg_byte_count = 0; | |
672 | skreq->discard_page = 0; | |
673 | ||
674 | /* | |
38d4a1bb | 675 | * OK to now dequeue request from q. |
e67f86b3 AB |
676 | * |
677 | * At this point we are comitted to either start or reject | |
678 | * the native request. Note that skd_request_context is | |
679 | * available but is still at the head of the free list. | |
680 | */ | |
fcd37eb3 JA |
681 | blk_start_request(req); |
682 | skreq->req = req; | |
683 | skreq->fitmsg_id = 0; | |
e67f86b3 AB |
684 | |
685 | /* Either a FIT msg is in progress or we have to start one. */ | |
686 | if (skmsg == NULL) { | |
687 | /* Are there any FIT msg buffers available? */ | |
688 | skmsg = skdev->skmsg_free_list; | |
689 | if (skmsg == NULL) { | |
2e44b427 | 690 | pr_debug("%s:%s:%d Out of msg skdev=%p\n", |
691 | skdev->name, __func__, __LINE__, | |
692 | skdev); | |
e67f86b3 AB |
693 | break; |
694 | } | |
695 | SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE); | |
696 | SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0); | |
697 | ||
698 | skdev->skmsg_free_list = skmsg->next; | |
699 | ||
700 | skmsg->state = SKD_MSG_STATE_BUSY; | |
701 | skmsg->id += SKD_ID_INCR; | |
702 | ||
703 | /* Initialize the FIT msg header */ | |
704 | fmh = (struct fit_msg_hdr *)skmsg->msg_buf; | |
705 | memset(fmh, 0, sizeof(*fmh)); | |
706 | fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; | |
707 | skmsg->length = sizeof(*fmh); | |
708 | } | |
709 | ||
710 | skreq->fitmsg_id = skmsg->id; | |
711 | ||
712 | /* | |
713 | * Note that a FIT msg may have just been started | |
714 | * but contains no SoFIT requests yet. | |
715 | */ | |
716 | ||
717 | /* | |
718 | * Transcode the request, checking as we go. The outcome of | |
719 | * the transcoding is represented by the error variable. | |
720 | */ | |
721 | cmd_ptr = &skmsg->msg_buf[skmsg->length]; | |
722 | memset(cmd_ptr, 0, 32); | |
723 | ||
724 | be_lba = cpu_to_be32(lba); | |
725 | be_count = cpu_to_be32(count); | |
726 | be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address); | |
727 | cmdctxt = skreq->id + SKD_ID_INCR; | |
728 | ||
729 | scsi_req = cmd_ptr; | |
730 | scsi_req->hdr.tag = cmdctxt; | |
731 | scsi_req->hdr.sg_list_dma_address = be_dmaa; | |
732 | ||
733 | if (data_dir == READ) | |
734 | skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST; | |
735 | else | |
736 | skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD; | |
737 | ||
738 | if (io_flags & REQ_DISCARD) { | |
739 | page = alloc_page(GFP_ATOMIC | __GFP_ZERO); | |
740 | if (!page) { | |
741 | pr_err("request_fn:Page allocation failed.\n"); | |
742 | skd_end_request(skdev, skreq, -ENOMEM); | |
743 | break; | |
744 | } | |
745 | skreq->discard_page = 1; | |
dc4a9307 | 746 | req->completion_data = page; |
e67f86b3 AB |
747 | skd_prep_discard_cdb(scsi_req, skreq, page, lba, count); |
748 | ||
749 | } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) { | |
750 | skd_prep_zerosize_flush_cdb(scsi_req, skreq); | |
751 | SKD_ASSERT(skreq->flush_cmd == 1); | |
752 | ||
753 | } else { | |
754 | skd_prep_rw_cdb(scsi_req, data_dir, lba, count); | |
755 | } | |
756 | ||
757 | if (fua) | |
758 | scsi_req->cdb[1] |= SKD_FUA_NV; | |
759 | ||
fcd37eb3 | 760 | if (!req->bio) |
e67f86b3 AB |
761 | goto skip_sg; |
762 | ||
763 | error = skd_preop_sg_list(skdev, skreq); | |
764 | ||
765 | if (error != 0) { | |
766 | /* | |
767 | * Complete the native request with error. | |
768 | * Note that the request context is still at the | |
769 | * head of the free list, and that the SoFIT request | |
770 | * was encoded into the FIT msg buffer but the FIT | |
771 | * msg length has not been updated. In short, the | |
772 | * only resource that has been allocated but might | |
773 | * not be used is that the FIT msg could be empty. | |
774 | */ | |
2e44b427 | 775 | pr_debug("%s:%s:%d error Out\n", |
776 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
777 | skd_end_request(skdev, skreq, error); |
778 | continue; | |
779 | } | |
780 | ||
781 | skip_sg: | |
782 | scsi_req->hdr.sg_list_len_bytes = | |
783 | cpu_to_be32(skreq->sg_byte_count); | |
784 | ||
785 | /* Complete resource allocations. */ | |
786 | skdev->skreq_free_list = skreq->next; | |
787 | skreq->state = SKD_REQ_STATE_BUSY; | |
788 | skreq->id += SKD_ID_INCR; | |
789 | ||
790 | skmsg->length += sizeof(struct skd_scsi_request); | |
791 | fmh->num_protocol_cmds_coalesced++; | |
792 | ||
793 | /* | |
794 | * Update the active request counts. | |
795 | * Capture the timeout timestamp. | |
796 | */ | |
797 | skreq->timeout_stamp = skdev->timeout_stamp; | |
798 | timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; | |
799 | skdev->timeout_slot[timo_slot]++; | |
800 | skdev->in_flight++; | |
2e44b427 | 801 | pr_debug("%s:%s:%d req=0x%x busy=%d\n", |
802 | skdev->name, __func__, __LINE__, | |
803 | skreq->id, skdev->in_flight); | |
e67f86b3 AB |
804 | |
805 | /* | |
806 | * If the FIT msg buffer is full send it. | |
807 | */ | |
808 | if (skmsg->length >= SKD_N_FITMSG_BYTES || | |
809 | fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { | |
810 | skd_send_fitmsg(skdev, skmsg); | |
811 | skmsg = NULL; | |
812 | fmh = NULL; | |
813 | } | |
814 | } | |
815 | ||
816 | /* | |
817 | * Is a FIT msg in progress? If it is empty put the buffer back | |
818 | * on the free list. If it is non-empty send what we got. | |
819 | * This minimizes latency when there are fewer requests than | |
820 | * what fits in a FIT msg. | |
821 | */ | |
822 | if (skmsg != NULL) { | |
823 | /* Bigger than just a FIT msg header? */ | |
824 | if (skmsg->length > sizeof(struct fit_msg_hdr)) { | |
2e44b427 | 825 | pr_debug("%s:%s:%d sending msg=%p, len %d\n", |
826 | skdev->name, __func__, __LINE__, | |
827 | skmsg, skmsg->length); | |
e67f86b3 AB |
828 | skd_send_fitmsg(skdev, skmsg); |
829 | } else { | |
830 | /* | |
831 | * The FIT msg is empty. It means we got started | |
832 | * on the msg, but the requests were rejected. | |
833 | */ | |
834 | skmsg->state = SKD_MSG_STATE_IDLE; | |
835 | skmsg->id += SKD_ID_INCR; | |
836 | skmsg->next = skdev->skmsg_free_list; | |
837 | skdev->skmsg_free_list = skmsg; | |
838 | } | |
839 | skmsg = NULL; | |
840 | fmh = NULL; | |
841 | } | |
842 | ||
843 | /* | |
844 | * If req is non-NULL it means there is something to do but | |
845 | * we are out of a resource. | |
846 | */ | |
fcd37eb3 | 847 | if (req) |
6a5ec65b | 848 | blk_stop_queue(skdev->queue); |
e67f86b3 AB |
849 | } |
850 | ||
38d4a1bb MS |
851 | static void skd_end_request(struct skd_device *skdev, |
852 | struct skd_request_context *skreq, int error) | |
e67f86b3 AB |
853 | { |
854 | struct request *req = skreq->req; | |
855 | unsigned int io_flags = req->cmd_flags; | |
856 | ||
857 | if ((io_flags & REQ_DISCARD) && | |
858 | (skreq->discard_page == 1)) { | |
38d4a1bb | 859 | pr_debug("%s:%s:%d, free the page!", |
2e44b427 | 860 | skdev->name, __func__, __LINE__); |
dc4a9307 | 861 | __free_page(req->completion_data); |
e67f86b3 AB |
862 | } |
863 | ||
864 | if (unlikely(error)) { | |
865 | struct request *req = skreq->req; | |
866 | char *cmd = (rq_data_dir(req) == READ) ? "read" : "write"; | |
867 | u32 lba = (u32)blk_rq_pos(req); | |
868 | u32 count = blk_rq_sectors(req); | |
869 | ||
870 | pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n", | |
871 | skd_name(skdev), cmd, lba, count, skreq->id); | |
872 | } else | |
2e44b427 | 873 | pr_debug("%s:%s:%d id=0x%x error=%d\n", |
874 | skdev->name, __func__, __LINE__, skreq->id, error); | |
e67f86b3 AB |
875 | |
876 | __blk_end_request_all(skreq->req, error); | |
877 | } | |
878 | ||
fcd37eb3 | 879 | static int skd_preop_sg_list(struct skd_device *skdev, |
38d4a1bb | 880 | struct skd_request_context *skreq) |
e67f86b3 AB |
881 | { |
882 | struct request *req = skreq->req; | |
883 | int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; | |
884 | int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; | |
885 | struct scatterlist *sg = &skreq->sg[0]; | |
886 | int n_sg; | |
887 | int i; | |
888 | ||
889 | skreq->sg_byte_count = 0; | |
890 | ||
891 | /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD || | |
892 | skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */ | |
893 | ||
894 | n_sg = blk_rq_map_sg(skdev->queue, req, sg); | |
895 | if (n_sg <= 0) | |
896 | return -EINVAL; | |
897 | ||
898 | /* | |
899 | * Map scatterlist to PCI bus addresses. | |
900 | * Note PCI might change the number of entries. | |
901 | */ | |
902 | n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir); | |
903 | if (n_sg <= 0) | |
904 | return -EINVAL; | |
905 | ||
906 | SKD_ASSERT(n_sg <= skdev->sgs_per_request); | |
907 | ||
908 | skreq->n_sg = n_sg; | |
909 | ||
910 | for (i = 0; i < n_sg; i++) { | |
911 | struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; | |
912 | u32 cnt = sg_dma_len(&sg[i]); | |
913 | uint64_t dma_addr = sg_dma_address(&sg[i]); | |
914 | ||
915 | sgd->control = FIT_SGD_CONTROL_NOT_LAST; | |
916 | sgd->byte_count = cnt; | |
917 | skreq->sg_byte_count += cnt; | |
918 | sgd->host_side_addr = dma_addr; | |
919 | sgd->dev_side_addr = 0; | |
920 | } | |
921 | ||
922 | skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; | |
923 | skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; | |
924 | ||
925 | if (unlikely(skdev->dbg_level > 1)) { | |
2e44b427 | 926 | pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n", |
927 | skdev->name, __func__, __LINE__, | |
928 | skreq->id, skreq->sksg_list, skreq->sksg_dma_address); | |
e67f86b3 AB |
929 | for (i = 0; i < n_sg; i++) { |
930 | struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; | |
2e44b427 | 931 | pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " |
932 | "addr=0x%llx next=0x%llx\n", | |
933 | skdev->name, __func__, __LINE__, | |
934 | i, sgd->byte_count, sgd->control, | |
935 | sgd->host_side_addr, sgd->next_desc_ptr); | |
e67f86b3 AB |
936 | } |
937 | } | |
938 | ||
939 | return 0; | |
940 | } | |
941 | ||
fcd37eb3 | 942 | static void skd_postop_sg_list(struct skd_device *skdev, |
38d4a1bb | 943 | struct skd_request_context *skreq) |
e67f86b3 AB |
944 | { |
945 | int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; | |
946 | int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; | |
947 | ||
948 | /* | |
949 | * restore the next ptr for next IO request so we | |
950 | * don't have to set it every time. | |
951 | */ | |
952 | skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = | |
953 | skreq->sksg_dma_address + | |
954 | ((skreq->n_sg) * sizeof(struct fit_sg_descriptor)); | |
955 | pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir); | |
956 | } | |
957 | ||
e67f86b3 AB |
958 | static void skd_request_fn_not_online(struct request_queue *q) |
959 | { | |
960 | struct skd_device *skdev = q->queuedata; | |
961 | int error; | |
962 | ||
963 | SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); | |
964 | ||
965 | skd_log_skdev(skdev, "req_not_online"); | |
966 | switch (skdev->state) { | |
967 | case SKD_DRVR_STATE_PAUSING: | |
968 | case SKD_DRVR_STATE_PAUSED: | |
969 | case SKD_DRVR_STATE_STARTING: | |
970 | case SKD_DRVR_STATE_RESTARTING: | |
971 | case SKD_DRVR_STATE_WAIT_BOOT: | |
972 | /* In case of starting, we haven't started the queue, | |
973 | * so we can't get here... but requests are | |
974 | * possibly hanging out waiting for us because we | |
975 | * reported the dev/skd0 already. They'll wait | |
976 | * forever if connect doesn't complete. | |
977 | * What to do??? delay dev/skd0 ?? | |
978 | */ | |
979 | case SKD_DRVR_STATE_BUSY: | |
980 | case SKD_DRVR_STATE_BUSY_IMMINENT: | |
981 | case SKD_DRVR_STATE_BUSY_ERASE: | |
982 | case SKD_DRVR_STATE_DRAINING_TIMEOUT: | |
983 | return; | |
984 | ||
985 | case SKD_DRVR_STATE_BUSY_SANITIZE: | |
986 | case SKD_DRVR_STATE_STOPPING: | |
987 | case SKD_DRVR_STATE_SYNCING: | |
988 | case SKD_DRVR_STATE_FAULT: | |
989 | case SKD_DRVR_STATE_DISAPPEARED: | |
990 | default: | |
991 | error = -EIO; | |
992 | break; | |
993 | } | |
994 | ||
995 | /* If we get here, terminate all pending block requeusts | |
996 | * with EIO and any scsi pass thru with appropriate sense | |
997 | */ | |
998 | ||
999 | skd_fail_all_pending(skdev); | |
1000 | } | |
1001 | ||
1002 | /* | |
1003 | ***************************************************************************** | |
1004 | * TIMER | |
1005 | ***************************************************************************** | |
1006 | */ | |
1007 | ||
1008 | static void skd_timer_tick_not_online(struct skd_device *skdev); | |
1009 | ||
1010 | static void skd_timer_tick(ulong arg) | |
1011 | { | |
1012 | struct skd_device *skdev = (struct skd_device *)arg; | |
1013 | ||
1014 | u32 timo_slot; | |
1015 | u32 overdue_timestamp; | |
1016 | unsigned long reqflags; | |
1017 | u32 state; | |
1018 | ||
1019 | if (skdev->state == SKD_DRVR_STATE_FAULT) | |
1020 | /* The driver has declared fault, and we want it to | |
1021 | * stay that way until driver is reloaded. | |
1022 | */ | |
1023 | return; | |
1024 | ||
1025 | spin_lock_irqsave(&skdev->lock, reqflags); | |
1026 | ||
1027 | state = SKD_READL(skdev, FIT_STATUS); | |
1028 | state &= FIT_SR_DRIVE_STATE_MASK; | |
1029 | if (state != skdev->drive_state) | |
1030 | skd_isr_fwstate(skdev); | |
1031 | ||
1032 | if (skdev->state != SKD_DRVR_STATE_ONLINE) { | |
1033 | skd_timer_tick_not_online(skdev); | |
1034 | goto timer_func_out; | |
1035 | } | |
1036 | skdev->timeout_stamp++; | |
1037 | timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; | |
1038 | ||
1039 | /* | |
1040 | * All requests that happened during the previous use of | |
1041 | * this slot should be done by now. The previous use was | |
1042 | * over 7 seconds ago. | |
1043 | */ | |
1044 | if (skdev->timeout_slot[timo_slot] == 0) | |
1045 | goto timer_func_out; | |
1046 | ||
1047 | /* Something is overdue */ | |
1048 | overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT; | |
1049 | ||
2e44b427 | 1050 | pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n", |
1051 | skdev->name, __func__, __LINE__, | |
1052 | skdev->timeout_slot[timo_slot], skdev->in_flight); | |
e67f86b3 AB |
1053 | pr_err("(%s): Overdue IOs (%d), busy %d\n", |
1054 | skd_name(skdev), skdev->timeout_slot[timo_slot], | |
1055 | skdev->in_flight); | |
1056 | ||
1057 | skdev->timer_countdown = SKD_DRAINING_TIMO; | |
1058 | skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT; | |
1059 | skdev->timo_slot = timo_slot; | |
6a5ec65b | 1060 | blk_stop_queue(skdev->queue); |
e67f86b3 AB |
1061 | |
1062 | timer_func_out: | |
1063 | mod_timer(&skdev->timer, (jiffies + HZ)); | |
1064 | ||
1065 | spin_unlock_irqrestore(&skdev->lock, reqflags); | |
1066 | } | |
1067 | ||
1068 | static void skd_timer_tick_not_online(struct skd_device *skdev) | |
1069 | { | |
1070 | switch (skdev->state) { | |
1071 | case SKD_DRVR_STATE_IDLE: | |
1072 | case SKD_DRVR_STATE_LOAD: | |
1073 | break; | |
1074 | case SKD_DRVR_STATE_BUSY_SANITIZE: | |
2e44b427 | 1075 | pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n", |
1076 | skdev->name, __func__, __LINE__, | |
1077 | skdev->drive_state, skdev->state); | |
e67f86b3 AB |
1078 | /* If we've been in sanitize for 3 seconds, we figure we're not |
1079 | * going to get anymore completions, so recover requests now | |
1080 | */ | |
1081 | if (skdev->timer_countdown > 0) { | |
1082 | skdev->timer_countdown--; | |
1083 | return; | |
1084 | } | |
1085 | skd_recover_requests(skdev, 0); | |
1086 | break; | |
1087 | ||
1088 | case SKD_DRVR_STATE_BUSY: | |
1089 | case SKD_DRVR_STATE_BUSY_IMMINENT: | |
1090 | case SKD_DRVR_STATE_BUSY_ERASE: | |
2e44b427 | 1091 | pr_debug("%s:%s:%d busy[%x], countdown=%d\n", |
1092 | skdev->name, __func__, __LINE__, | |
1093 | skdev->state, skdev->timer_countdown); | |
e67f86b3 AB |
1094 | if (skdev->timer_countdown > 0) { |
1095 | skdev->timer_countdown--; | |
1096 | return; | |
1097 | } | |
2e44b427 | 1098 | pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.", |
1099 | skdev->name, __func__, __LINE__, | |
1100 | skdev->state, skdev->timer_countdown); | |
e67f86b3 AB |
1101 | skd_restart_device(skdev); |
1102 | break; | |
1103 | ||
1104 | case SKD_DRVR_STATE_WAIT_BOOT: | |
1105 | case SKD_DRVR_STATE_STARTING: | |
1106 | if (skdev->timer_countdown > 0) { | |
1107 | skdev->timer_countdown--; | |
1108 | return; | |
1109 | } | |
1110 | /* For now, we fault the drive. Could attempt resets to | |
1111 | * revcover at some point. */ | |
1112 | skdev->state = SKD_DRVR_STATE_FAULT; | |
1113 | ||
1114 | pr_err("(%s): DriveFault Connect Timeout (%x)\n", | |
1115 | skd_name(skdev), skdev->drive_state); | |
1116 | ||
1117 | /*start the queue so we can respond with error to requests */ | |
1118 | /* wakeup anyone waiting for startup complete */ | |
6a5ec65b | 1119 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
1120 | skdev->gendisk_on = -1; |
1121 | wake_up_interruptible(&skdev->waitq); | |
1122 | break; | |
1123 | ||
1124 | case SKD_DRVR_STATE_ONLINE: | |
1125 | /* shouldn't get here. */ | |
1126 | break; | |
1127 | ||
1128 | case SKD_DRVR_STATE_PAUSING: | |
1129 | case SKD_DRVR_STATE_PAUSED: | |
1130 | break; | |
1131 | ||
1132 | case SKD_DRVR_STATE_DRAINING_TIMEOUT: | |
2e44b427 | 1133 | pr_debug("%s:%s:%d " |
1134 | "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n", | |
1135 | skdev->name, __func__, __LINE__, | |
1136 | skdev->timo_slot, | |
1137 | skdev->timer_countdown, | |
1138 | skdev->in_flight, | |
1139 | skdev->timeout_slot[skdev->timo_slot]); | |
e67f86b3 AB |
1140 | /* if the slot has cleared we can let the I/O continue */ |
1141 | if (skdev->timeout_slot[skdev->timo_slot] == 0) { | |
2e44b427 | 1142 | pr_debug("%s:%s:%d Slot drained, starting queue.\n", |
1143 | skdev->name, __func__, __LINE__); | |
e67f86b3 | 1144 | skdev->state = SKD_DRVR_STATE_ONLINE; |
6a5ec65b | 1145 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
1146 | return; |
1147 | } | |
1148 | if (skdev->timer_countdown > 0) { | |
1149 | skdev->timer_countdown--; | |
1150 | return; | |
1151 | } | |
1152 | skd_restart_device(skdev); | |
1153 | break; | |
1154 | ||
1155 | case SKD_DRVR_STATE_RESTARTING: | |
1156 | if (skdev->timer_countdown > 0) { | |
1157 | skdev->timer_countdown--; | |
1158 | return; | |
1159 | } | |
1160 | /* For now, we fault the drive. Could attempt resets to | |
1161 | * revcover at some point. */ | |
1162 | skdev->state = SKD_DRVR_STATE_FAULT; | |
1163 | pr_err("(%s): DriveFault Reconnect Timeout (%x)\n", | |
1164 | skd_name(skdev), skdev->drive_state); | |
1165 | ||
1166 | /* | |
1167 | * Recovering does two things: | |
1168 | * 1. completes IO with error | |
1169 | * 2. reclaims dma resources | |
1170 | * When is it safe to recover requests? | |
1171 | * - if the drive state is faulted | |
1172 | * - if the state is still soft reset after out timeout | |
1173 | * - if the drive registers are dead (state = FF) | |
1174 | * If it is "unsafe", we still need to recover, so we will | |
1175 | * disable pci bus mastering and disable our interrupts. | |
1176 | */ | |
1177 | ||
1178 | if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) || | |
1179 | (skdev->drive_state == FIT_SR_DRIVE_FAULT) || | |
1180 | (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) | |
1181 | /* It never came out of soft reset. Try to | |
1182 | * recover the requests and then let them | |
1183 | * fail. This is to mitigate hung processes. */ | |
1184 | skd_recover_requests(skdev, 0); | |
1185 | else { | |
1186 | pr_err("(%s): Disable BusMaster (%x)\n", | |
1187 | skd_name(skdev), skdev->drive_state); | |
1188 | pci_disable_device(skdev->pdev); | |
1189 | skd_disable_interrupts(skdev); | |
1190 | skd_recover_requests(skdev, 0); | |
1191 | } | |
1192 | ||
1193 | /*start the queue so we can respond with error to requests */ | |
1194 | /* wakeup anyone waiting for startup complete */ | |
6a5ec65b | 1195 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
1196 | skdev->gendisk_on = -1; |
1197 | wake_up_interruptible(&skdev->waitq); | |
1198 | break; | |
1199 | ||
1200 | case SKD_DRVR_STATE_RESUMING: | |
1201 | case SKD_DRVR_STATE_STOPPING: | |
1202 | case SKD_DRVR_STATE_SYNCING: | |
1203 | case SKD_DRVR_STATE_FAULT: | |
1204 | case SKD_DRVR_STATE_DISAPPEARED: | |
1205 | default: | |
1206 | break; | |
1207 | } | |
1208 | } | |
1209 | ||
1210 | static int skd_start_timer(struct skd_device *skdev) | |
1211 | { | |
1212 | int rc; | |
1213 | ||
1214 | init_timer(&skdev->timer); | |
1215 | setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev); | |
1216 | ||
1217 | rc = mod_timer(&skdev->timer, (jiffies + HZ)); | |
1218 | if (rc) | |
1219 | pr_err("%s: failed to start timer %d\n", | |
1220 | __func__, rc); | |
1221 | return rc; | |
1222 | } | |
1223 | ||
1224 | static void skd_kill_timer(struct skd_device *skdev) | |
1225 | { | |
1226 | del_timer_sync(&skdev->timer); | |
1227 | } | |
1228 | ||
1229 | /* | |
1230 | ***************************************************************************** | |
1231 | * IOCTL | |
1232 | ***************************************************************************** | |
1233 | */ | |
1234 | static int skd_ioctl_sg_io(struct skd_device *skdev, | |
1235 | fmode_t mode, void __user *argp); | |
1236 | static int skd_sg_io_get_and_check_args(struct skd_device *skdev, | |
1237 | struct skd_sg_io *sksgio); | |
1238 | static int skd_sg_io_obtain_skspcl(struct skd_device *skdev, | |
1239 | struct skd_sg_io *sksgio); | |
1240 | static int skd_sg_io_prep_buffering(struct skd_device *skdev, | |
1241 | struct skd_sg_io *sksgio); | |
1242 | static int skd_sg_io_copy_buffer(struct skd_device *skdev, | |
1243 | struct skd_sg_io *sksgio, int dxfer_dir); | |
1244 | static int skd_sg_io_send_fitmsg(struct skd_device *skdev, | |
1245 | struct skd_sg_io *sksgio); | |
1246 | static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio); | |
1247 | static int skd_sg_io_release_skspcl(struct skd_device *skdev, | |
1248 | struct skd_sg_io *sksgio); | |
1249 | static int skd_sg_io_put_status(struct skd_device *skdev, | |
1250 | struct skd_sg_io *sksgio); | |
1251 | ||
1252 | static void skd_complete_special(struct skd_device *skdev, | |
1253 | volatile struct fit_completion_entry_v1 | |
1254 | *skcomp, | |
1255 | volatile struct fit_comp_error_info *skerr, | |
1256 | struct skd_special_context *skspcl); | |
1257 | ||
1258 | static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode, | |
1259 | uint cmd_in, ulong arg) | |
1260 | { | |
1261 | int rc = 0; | |
1262 | struct gendisk *disk = bdev->bd_disk; | |
1263 | struct skd_device *skdev = disk->private_data; | |
1264 | void __user *p = (void *)arg; | |
1265 | ||
2e44b427 | 1266 | pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n", |
1267 | skdev->name, __func__, __LINE__, | |
1268 | disk->disk_name, current->comm, mode, cmd_in, arg); | |
e67f86b3 AB |
1269 | |
1270 | if (!capable(CAP_SYS_ADMIN)) | |
1271 | return -EPERM; | |
1272 | ||
1273 | switch (cmd_in) { | |
1274 | case SG_SET_TIMEOUT: | |
1275 | case SG_GET_TIMEOUT: | |
1276 | case SG_GET_VERSION_NUM: | |
1277 | rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p); | |
1278 | break; | |
1279 | case SG_IO: | |
1280 | rc = skd_ioctl_sg_io(skdev, mode, p); | |
1281 | break; | |
1282 | ||
1283 | default: | |
1284 | rc = -ENOTTY; | |
1285 | break; | |
1286 | } | |
1287 | ||
2e44b427 | 1288 | pr_debug("%s:%s:%d %s: completion rc %d\n", |
1289 | skdev->name, __func__, __LINE__, disk->disk_name, rc); | |
e67f86b3 AB |
1290 | return rc; |
1291 | } | |
1292 | ||
1293 | static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode, | |
1294 | void __user *argp) | |
1295 | { | |
1296 | int rc; | |
1297 | struct skd_sg_io sksgio; | |
1298 | ||
1299 | memset(&sksgio, 0, sizeof(sksgio)); | |
1300 | sksgio.mode = mode; | |
1301 | sksgio.argp = argp; | |
1302 | sksgio.iov = &sksgio.no_iov_iov; | |
1303 | ||
1304 | switch (skdev->state) { | |
1305 | case SKD_DRVR_STATE_ONLINE: | |
1306 | case SKD_DRVR_STATE_BUSY_IMMINENT: | |
1307 | break; | |
1308 | ||
1309 | default: | |
2e44b427 | 1310 | pr_debug("%s:%s:%d drive not online\n", |
1311 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
1312 | rc = -ENXIO; |
1313 | goto out; | |
1314 | } | |
1315 | ||
f721bb0d AB |
1316 | rc = skd_sg_io_get_and_check_args(skdev, &sksgio); |
1317 | if (rc) | |
1318 | goto out; | |
1319 | ||
1320 | rc = skd_sg_io_obtain_skspcl(skdev, &sksgio); | |
1321 | if (rc) | |
1322 | goto out; | |
1323 | ||
1324 | rc = skd_sg_io_prep_buffering(skdev, &sksgio); | |
1325 | if (rc) | |
1326 | goto out; | |
1327 | ||
1328 | rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV); | |
1329 | if (rc) | |
e67f86b3 AB |
1330 | goto out; |
1331 | ||
f721bb0d AB |
1332 | rc = skd_sg_io_send_fitmsg(skdev, &sksgio); |
1333 | if (rc) | |
e67f86b3 AB |
1334 | goto out; |
1335 | ||
f721bb0d AB |
1336 | rc = skd_sg_io_await(skdev, &sksgio); |
1337 | if (rc) | |
1338 | goto out; | |
1339 | ||
1340 | rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV); | |
1341 | if (rc) | |
1342 | goto out; | |
1343 | ||
1344 | rc = skd_sg_io_put_status(skdev, &sksgio); | |
1345 | if (rc) | |
e67f86b3 AB |
1346 | goto out; |
1347 | ||
1348 | rc = 0; | |
1349 | ||
1350 | out: | |
1351 | skd_sg_io_release_skspcl(skdev, &sksgio); | |
1352 | ||
1353 | if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov) | |
1354 | kfree(sksgio.iov); | |
1355 | return rc; | |
1356 | } | |
1357 | ||
1358 | static int skd_sg_io_get_and_check_args(struct skd_device *skdev, | |
1359 | struct skd_sg_io *sksgio) | |
1360 | { | |
1361 | struct sg_io_hdr *sgp = &sksgio->sg; | |
1362 | int i, acc; | |
1363 | ||
1364 | if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) { | |
2e44b427 | 1365 | pr_debug("%s:%s:%d access sg failed %p\n", |
1366 | skdev->name, __func__, __LINE__, sksgio->argp); | |
e67f86b3 AB |
1367 | return -EFAULT; |
1368 | } | |
1369 | ||
1370 | if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) { | |
2e44b427 | 1371 | pr_debug("%s:%s:%d copy_from_user sg failed %p\n", |
1372 | skdev->name, __func__, __LINE__, sksgio->argp); | |
e67f86b3 AB |
1373 | return -EFAULT; |
1374 | } | |
1375 | ||
1376 | if (sgp->interface_id != SG_INTERFACE_ID_ORIG) { | |
2e44b427 | 1377 | pr_debug("%s:%s:%d interface_id invalid 0x%x\n", |
1378 | skdev->name, __func__, __LINE__, sgp->interface_id); | |
e67f86b3 AB |
1379 | return -EINVAL; |
1380 | } | |
1381 | ||
1382 | if (sgp->cmd_len > sizeof(sksgio->cdb)) { | |
2e44b427 | 1383 | pr_debug("%s:%s:%d cmd_len invalid %d\n", |
1384 | skdev->name, __func__, __LINE__, sgp->cmd_len); | |
e67f86b3 AB |
1385 | return -EINVAL; |
1386 | } | |
1387 | ||
1388 | if (sgp->iovec_count > 256) { | |
2e44b427 | 1389 | pr_debug("%s:%s:%d iovec_count invalid %d\n", |
1390 | skdev->name, __func__, __LINE__, sgp->iovec_count); | |
e67f86b3 AB |
1391 | return -EINVAL; |
1392 | } | |
1393 | ||
1394 | if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) { | |
2e44b427 | 1395 | pr_debug("%s:%s:%d dxfer_len invalid %d\n", |
1396 | skdev->name, __func__, __LINE__, sgp->dxfer_len); | |
e67f86b3 AB |
1397 | return -EINVAL; |
1398 | } | |
1399 | ||
1400 | switch (sgp->dxfer_direction) { | |
1401 | case SG_DXFER_NONE: | |
1402 | acc = -1; | |
1403 | break; | |
1404 | ||
1405 | case SG_DXFER_TO_DEV: | |
1406 | acc = VERIFY_READ; | |
1407 | break; | |
1408 | ||
1409 | case SG_DXFER_FROM_DEV: | |
1410 | case SG_DXFER_TO_FROM_DEV: | |
1411 | acc = VERIFY_WRITE; | |
1412 | break; | |
1413 | ||
1414 | default: | |
2e44b427 | 1415 | pr_debug("%s:%s:%d dxfer_dir invalid %d\n", |
1416 | skdev->name, __func__, __LINE__, sgp->dxfer_direction); | |
e67f86b3 AB |
1417 | return -EINVAL; |
1418 | } | |
1419 | ||
1420 | if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) { | |
2e44b427 | 1421 | pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n", |
1422 | skdev->name, __func__, __LINE__, sgp->cmdp); | |
e67f86b3 AB |
1423 | return -EFAULT; |
1424 | } | |
1425 | ||
1426 | if (sgp->mx_sb_len != 0) { | |
1427 | if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) { | |
2e44b427 | 1428 | pr_debug("%s:%s:%d access sbp failed %p\n", |
1429 | skdev->name, __func__, __LINE__, sgp->sbp); | |
e67f86b3 AB |
1430 | return -EFAULT; |
1431 | } | |
1432 | } | |
1433 | ||
1434 | if (sgp->iovec_count == 0) { | |
1435 | sksgio->iov[0].iov_base = sgp->dxferp; | |
1436 | sksgio->iov[0].iov_len = sgp->dxfer_len; | |
1437 | sksgio->iovcnt = 1; | |
1438 | sksgio->dxfer_len = sgp->dxfer_len; | |
1439 | } else { | |
1440 | struct sg_iovec *iov; | |
1441 | uint nbytes = sizeof(*iov) * sgp->iovec_count; | |
1442 | size_t iov_data_len; | |
1443 | ||
1444 | iov = kmalloc(nbytes, GFP_KERNEL); | |
1445 | if (iov == NULL) { | |
2e44b427 | 1446 | pr_debug("%s:%s:%d alloc iovec failed %d\n", |
1447 | skdev->name, __func__, __LINE__, | |
1448 | sgp->iovec_count); | |
e67f86b3 AB |
1449 | return -ENOMEM; |
1450 | } | |
1451 | sksgio->iov = iov; | |
1452 | sksgio->iovcnt = sgp->iovec_count; | |
1453 | ||
1454 | if (copy_from_user(iov, sgp->dxferp, nbytes)) { | |
2e44b427 | 1455 | pr_debug("%s:%s:%d copy_from_user iovec failed %p\n", |
1456 | skdev->name, __func__, __LINE__, sgp->dxferp); | |
e67f86b3 AB |
1457 | return -EFAULT; |
1458 | } | |
1459 | ||
1460 | /* | |
1461 | * Sum up the vecs, making sure they don't overflow | |
1462 | */ | |
1463 | iov_data_len = 0; | |
1464 | for (i = 0; i < sgp->iovec_count; i++) { | |
1465 | if (iov_data_len + iov[i].iov_len < iov_data_len) | |
1466 | return -EINVAL; | |
1467 | iov_data_len += iov[i].iov_len; | |
1468 | } | |
1469 | ||
1470 | /* SG_IO howto says that the shorter of the two wins */ | |
1471 | if (sgp->dxfer_len < iov_data_len) { | |
1472 | sksgio->iovcnt = iov_shorten((struct iovec *)iov, | |
1473 | sgp->iovec_count, | |
1474 | sgp->dxfer_len); | |
1475 | sksgio->dxfer_len = sgp->dxfer_len; | |
1476 | } else | |
1477 | sksgio->dxfer_len = iov_data_len; | |
1478 | } | |
1479 | ||
1480 | if (sgp->dxfer_direction != SG_DXFER_NONE) { | |
1481 | struct sg_iovec *iov = sksgio->iov; | |
1482 | for (i = 0; i < sksgio->iovcnt; i++, iov++) { | |
1483 | if (!access_ok(acc, iov->iov_base, iov->iov_len)) { | |
2e44b427 | 1484 | pr_debug("%s:%s:%d access data failed %p/%d\n", |
1485 | skdev->name, __func__, __LINE__, | |
1486 | iov->iov_base, (int)iov->iov_len); | |
e67f86b3 AB |
1487 | return -EFAULT; |
1488 | } | |
1489 | } | |
1490 | } | |
1491 | ||
1492 | return 0; | |
1493 | } | |
1494 | ||
1495 | static int skd_sg_io_obtain_skspcl(struct skd_device *skdev, | |
1496 | struct skd_sg_io *sksgio) | |
1497 | { | |
1498 | struct skd_special_context *skspcl = NULL; | |
1499 | int rc; | |
1500 | ||
38d4a1bb | 1501 | for (;;) { |
e67f86b3 AB |
1502 | ulong flags; |
1503 | ||
1504 | spin_lock_irqsave(&skdev->lock, flags); | |
1505 | skspcl = skdev->skspcl_free_list; | |
1506 | if (skspcl != NULL) { | |
1507 | skdev->skspcl_free_list = | |
1508 | (struct skd_special_context *)skspcl->req.next; | |
1509 | skspcl->req.id += SKD_ID_INCR; | |
1510 | skspcl->req.state = SKD_REQ_STATE_SETUP; | |
1511 | skspcl->orphaned = 0; | |
1512 | skspcl->req.n_sg = 0; | |
1513 | } | |
1514 | spin_unlock_irqrestore(&skdev->lock, flags); | |
1515 | ||
1516 | if (skspcl != NULL) { | |
1517 | rc = 0; | |
1518 | break; | |
1519 | } | |
1520 | ||
2e44b427 | 1521 | pr_debug("%s:%s:%d blocking\n", |
1522 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
1523 | |
1524 | rc = wait_event_interruptible_timeout( | |
1525 | skdev->waitq, | |
1526 | (skdev->skspcl_free_list != NULL), | |
1527 | msecs_to_jiffies(sksgio->sg.timeout)); | |
1528 | ||
2e44b427 | 1529 | pr_debug("%s:%s:%d unblocking, rc=%d\n", |
1530 | skdev->name, __func__, __LINE__, rc); | |
e67f86b3 AB |
1531 | |
1532 | if (rc <= 0) { | |
1533 | if (rc == 0) | |
1534 | rc = -ETIMEDOUT; | |
1535 | else | |
1536 | rc = -EINTR; | |
1537 | break; | |
1538 | } | |
1539 | /* | |
1540 | * If we get here rc > 0 meaning the timeout to | |
1541 | * wait_event_interruptible_timeout() had time left, hence the | |
1542 | * sought event -- non-empty free list -- happened. | |
1543 | * Retry the allocation. | |
1544 | */ | |
1545 | } | |
1546 | sksgio->skspcl = skspcl; | |
1547 | ||
1548 | return rc; | |
1549 | } | |
1550 | ||
1551 | static int skd_skreq_prep_buffering(struct skd_device *skdev, | |
1552 | struct skd_request_context *skreq, | |
1553 | u32 dxfer_len) | |
1554 | { | |
1555 | u32 resid = dxfer_len; | |
1556 | ||
1557 | /* | |
1558 | * The DMA engine must have aligned addresses and byte counts. | |
1559 | */ | |
1560 | resid += (-resid) & 3; | |
1561 | skreq->sg_byte_count = resid; | |
1562 | ||
1563 | skreq->n_sg = 0; | |
1564 | ||
1565 | while (resid > 0) { | |
1566 | u32 nbytes = PAGE_SIZE; | |
1567 | u32 ix = skreq->n_sg; | |
1568 | struct scatterlist *sg = &skreq->sg[ix]; | |
1569 | struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix]; | |
1570 | struct page *page; | |
1571 | ||
1572 | if (nbytes > resid) | |
1573 | nbytes = resid; | |
1574 | ||
1575 | page = alloc_page(GFP_KERNEL); | |
1576 | if (page == NULL) | |
1577 | return -ENOMEM; | |
1578 | ||
1579 | sg_set_page(sg, page, nbytes, 0); | |
1580 | ||
1581 | /* TODO: This should be going through a pci_???() | |
1582 | * routine to do proper mapping. */ | |
1583 | sksg->control = FIT_SGD_CONTROL_NOT_LAST; | |
1584 | sksg->byte_count = nbytes; | |
1585 | ||
1586 | sksg->host_side_addr = sg_phys(sg); | |
1587 | ||
1588 | sksg->dev_side_addr = 0; | |
1589 | sksg->next_desc_ptr = skreq->sksg_dma_address + | |
1590 | (ix + 1) * sizeof(*sksg); | |
1591 | ||
1592 | skreq->n_sg++; | |
1593 | resid -= nbytes; | |
1594 | } | |
1595 | ||
1596 | if (skreq->n_sg > 0) { | |
1597 | u32 ix = skreq->n_sg - 1; | |
1598 | struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix]; | |
1599 | ||
1600 | sksg->control = FIT_SGD_CONTROL_LAST; | |
1601 | sksg->next_desc_ptr = 0; | |
1602 | } | |
1603 | ||
1604 | if (unlikely(skdev->dbg_level > 1)) { | |
1605 | u32 i; | |
1606 | ||
2e44b427 | 1607 | pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n", |
1608 | skdev->name, __func__, __LINE__, | |
1609 | skreq->id, skreq->sksg_list, skreq->sksg_dma_address); | |
e67f86b3 AB |
1610 | for (i = 0; i < skreq->n_sg; i++) { |
1611 | struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; | |
1612 | ||
2e44b427 | 1613 | pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " |
1614 | "addr=0x%llx next=0x%llx\n", | |
1615 | skdev->name, __func__, __LINE__, | |
1616 | i, sgd->byte_count, sgd->control, | |
1617 | sgd->host_side_addr, sgd->next_desc_ptr); | |
e67f86b3 AB |
1618 | } |
1619 | } | |
1620 | ||
1621 | return 0; | |
1622 | } | |
1623 | ||
1624 | static int skd_sg_io_prep_buffering(struct skd_device *skdev, | |
1625 | struct skd_sg_io *sksgio) | |
1626 | { | |
1627 | struct skd_special_context *skspcl = sksgio->skspcl; | |
1628 | struct skd_request_context *skreq = &skspcl->req; | |
1629 | u32 dxfer_len = sksgio->dxfer_len; | |
1630 | int rc; | |
1631 | ||
1632 | rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len); | |
1633 | /* | |
1634 | * Eventually, errors or not, skd_release_special() is called | |
1635 | * to recover allocations including partial allocations. | |
1636 | */ | |
1637 | return rc; | |
1638 | } | |
1639 | ||
1640 | static int skd_sg_io_copy_buffer(struct skd_device *skdev, | |
1641 | struct skd_sg_io *sksgio, int dxfer_dir) | |
1642 | { | |
1643 | struct skd_special_context *skspcl = sksgio->skspcl; | |
1644 | u32 iov_ix = 0; | |
1645 | struct sg_iovec curiov; | |
1646 | u32 sksg_ix = 0; | |
1647 | u8 *bufp = NULL; | |
1648 | u32 buf_len = 0; | |
1649 | u32 resid = sksgio->dxfer_len; | |
1650 | int rc; | |
1651 | ||
1652 | curiov.iov_len = 0; | |
1653 | curiov.iov_base = NULL; | |
1654 | ||
1655 | if (dxfer_dir != sksgio->sg.dxfer_direction) { | |
1656 | if (dxfer_dir != SG_DXFER_TO_DEV || | |
1657 | sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV) | |
1658 | return 0; | |
1659 | } | |
1660 | ||
1661 | while (resid > 0) { | |
1662 | u32 nbytes = PAGE_SIZE; | |
1663 | ||
1664 | if (curiov.iov_len == 0) { | |
1665 | curiov = sksgio->iov[iov_ix++]; | |
1666 | continue; | |
1667 | } | |
1668 | ||
1669 | if (buf_len == 0) { | |
1670 | struct page *page; | |
1671 | page = sg_page(&skspcl->req.sg[sksg_ix++]); | |
1672 | bufp = page_address(page); | |
1673 | buf_len = PAGE_SIZE; | |
1674 | } | |
1675 | ||
1676 | nbytes = min_t(u32, nbytes, resid); | |
1677 | nbytes = min_t(u32, nbytes, curiov.iov_len); | |
1678 | nbytes = min_t(u32, nbytes, buf_len); | |
1679 | ||
1680 | if (dxfer_dir == SG_DXFER_TO_DEV) | |
1681 | rc = __copy_from_user(bufp, curiov.iov_base, nbytes); | |
1682 | else | |
1683 | rc = __copy_to_user(curiov.iov_base, bufp, nbytes); | |
1684 | ||
1685 | if (rc) | |
1686 | return -EFAULT; | |
1687 | ||
1688 | resid -= nbytes; | |
1689 | curiov.iov_len -= nbytes; | |
1690 | curiov.iov_base += nbytes; | |
1691 | buf_len -= nbytes; | |
1692 | } | |
1693 | ||
1694 | return 0; | |
1695 | } | |
1696 | ||
1697 | static int skd_sg_io_send_fitmsg(struct skd_device *skdev, | |
1698 | struct skd_sg_io *sksgio) | |
1699 | { | |
1700 | struct skd_special_context *skspcl = sksgio->skspcl; | |
1701 | struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf; | |
1702 | struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1]; | |
1703 | ||
1704 | memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES); | |
1705 | ||
1706 | /* Initialize the FIT msg header */ | |
1707 | fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; | |
1708 | fmh->num_protocol_cmds_coalesced = 1; | |
1709 | ||
1710 | /* Initialize the SCSI request */ | |
1711 | if (sksgio->sg.dxfer_direction != SG_DXFER_NONE) | |
1712 | scsi_req->hdr.sg_list_dma_address = | |
1713 | cpu_to_be64(skspcl->req.sksg_dma_address); | |
1714 | scsi_req->hdr.tag = skspcl->req.id; | |
1715 | scsi_req->hdr.sg_list_len_bytes = | |
1716 | cpu_to_be32(skspcl->req.sg_byte_count); | |
1717 | memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb)); | |
1718 | ||
1719 | skspcl->req.state = SKD_REQ_STATE_BUSY; | |
1720 | skd_send_special_fitmsg(skdev, skspcl); | |
1721 | ||
1722 | return 0; | |
1723 | } | |
1724 | ||
1725 | static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio) | |
1726 | { | |
1727 | unsigned long flags; | |
1728 | int rc; | |
1729 | ||
1730 | rc = wait_event_interruptible_timeout(skdev->waitq, | |
1731 | (sksgio->skspcl->req.state != | |
1732 | SKD_REQ_STATE_BUSY), | |
1733 | msecs_to_jiffies(sksgio->sg. | |
1734 | timeout)); | |
1735 | ||
1736 | spin_lock_irqsave(&skdev->lock, flags); | |
1737 | ||
1738 | if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) { | |
2e44b427 | 1739 | pr_debug("%s:%s:%d skspcl %p aborted\n", |
1740 | skdev->name, __func__, __LINE__, sksgio->skspcl); | |
e67f86b3 AB |
1741 | |
1742 | /* Build check cond, sense and let command finish. */ | |
1743 | /* For a timeout, we must fabricate completion and sense | |
1744 | * data to complete the command */ | |
1745 | sksgio->skspcl->req.completion.status = | |
1746 | SAM_STAT_CHECK_CONDITION; | |
1747 | ||
1748 | memset(&sksgio->skspcl->req.err_info, 0, | |
1749 | sizeof(sksgio->skspcl->req.err_info)); | |
1750 | sksgio->skspcl->req.err_info.type = 0x70; | |
1751 | sksgio->skspcl->req.err_info.key = ABORTED_COMMAND; | |
1752 | sksgio->skspcl->req.err_info.code = 0x44; | |
1753 | sksgio->skspcl->req.err_info.qual = 0; | |
1754 | rc = 0; | |
1755 | } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY) | |
1756 | /* No longer on the adapter. We finish. */ | |
1757 | rc = 0; | |
1758 | else { | |
1759 | /* Something's gone wrong. Still busy. Timeout or | |
1760 | * user interrupted (control-C). Mark as an orphan | |
1761 | * so it will be disposed when completed. */ | |
1762 | sksgio->skspcl->orphaned = 1; | |
1763 | sksgio->skspcl = NULL; | |
1764 | if (rc == 0) { | |
2e44b427 | 1765 | pr_debug("%s:%s:%d timed out %p (%u ms)\n", |
1766 | skdev->name, __func__, __LINE__, | |
1767 | sksgio, sksgio->sg.timeout); | |
e67f86b3 AB |
1768 | rc = -ETIMEDOUT; |
1769 | } else { | |
2e44b427 | 1770 | pr_debug("%s:%s:%d cntlc %p\n", |
1771 | skdev->name, __func__, __LINE__, sksgio); | |
e67f86b3 AB |
1772 | rc = -EINTR; |
1773 | } | |
1774 | } | |
1775 | ||
1776 | spin_unlock_irqrestore(&skdev->lock, flags); | |
1777 | ||
1778 | return rc; | |
1779 | } | |
1780 | ||
1781 | static int skd_sg_io_put_status(struct skd_device *skdev, | |
1782 | struct skd_sg_io *sksgio) | |
1783 | { | |
1784 | struct sg_io_hdr *sgp = &sksgio->sg; | |
1785 | struct skd_special_context *skspcl = sksgio->skspcl; | |
1786 | int resid = 0; | |
1787 | ||
1788 | u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes); | |
1789 | ||
1790 | sgp->status = skspcl->req.completion.status; | |
1791 | resid = sksgio->dxfer_len - nb; | |
1792 | ||
1793 | sgp->masked_status = sgp->status & STATUS_MASK; | |
1794 | sgp->msg_status = 0; | |
1795 | sgp->host_status = 0; | |
1796 | sgp->driver_status = 0; | |
1797 | sgp->resid = resid; | |
1798 | if (sgp->masked_status || sgp->host_status || sgp->driver_status) | |
1799 | sgp->info |= SG_INFO_CHECK; | |
1800 | ||
2e44b427 | 1801 | pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n", |
1802 | skdev->name, __func__, __LINE__, | |
1803 | sgp->status, sgp->masked_status, sgp->resid); | |
e67f86b3 AB |
1804 | |
1805 | if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) { | |
1806 | if (sgp->mx_sb_len > 0) { | |
1807 | struct fit_comp_error_info *ei = &skspcl->req.err_info; | |
1808 | u32 nbytes = sizeof(*ei); | |
1809 | ||
1810 | nbytes = min_t(u32, nbytes, sgp->mx_sb_len); | |
1811 | ||
1812 | sgp->sb_len_wr = nbytes; | |
1813 | ||
1814 | if (__copy_to_user(sgp->sbp, ei, nbytes)) { | |
2e44b427 | 1815 | pr_debug("%s:%s:%d copy_to_user sense failed %p\n", |
1816 | skdev->name, __func__, __LINE__, | |
1817 | sgp->sbp); | |
e67f86b3 AB |
1818 | return -EFAULT; |
1819 | } | |
1820 | } | |
1821 | } | |
1822 | ||
1823 | if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) { | |
2e44b427 | 1824 | pr_debug("%s:%s:%d copy_to_user sg failed %p\n", |
1825 | skdev->name, __func__, __LINE__, sksgio->argp); | |
e67f86b3 AB |
1826 | return -EFAULT; |
1827 | } | |
1828 | ||
1829 | return 0; | |
1830 | } | |
1831 | ||
1832 | static int skd_sg_io_release_skspcl(struct skd_device *skdev, | |
1833 | struct skd_sg_io *sksgio) | |
1834 | { | |
1835 | struct skd_special_context *skspcl = sksgio->skspcl; | |
1836 | ||
1837 | if (skspcl != NULL) { | |
1838 | ulong flags; | |
1839 | ||
1840 | sksgio->skspcl = NULL; | |
1841 | ||
1842 | spin_lock_irqsave(&skdev->lock, flags); | |
1843 | skd_release_special(skdev, skspcl); | |
1844 | spin_unlock_irqrestore(&skdev->lock, flags); | |
1845 | } | |
1846 | ||
1847 | return 0; | |
1848 | } | |
1849 | ||
1850 | /* | |
1851 | ***************************************************************************** | |
1852 | * INTERNAL REQUESTS -- generated by driver itself | |
1853 | ***************************************************************************** | |
1854 | */ | |
1855 | ||
1856 | static int skd_format_internal_skspcl(struct skd_device *skdev) | |
1857 | { | |
1858 | struct skd_special_context *skspcl = &skdev->internal_skspcl; | |
1859 | struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; | |
1860 | struct fit_msg_hdr *fmh; | |
1861 | uint64_t dma_address; | |
1862 | struct skd_scsi_request *scsi; | |
1863 | ||
1864 | fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0]; | |
1865 | fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; | |
1866 | fmh->num_protocol_cmds_coalesced = 1; | |
1867 | ||
1868 | scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64]; | |
1869 | memset(scsi, 0, sizeof(*scsi)); | |
1870 | dma_address = skspcl->req.sksg_dma_address; | |
1871 | scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address); | |
1872 | sgd->control = FIT_SGD_CONTROL_LAST; | |
1873 | sgd->byte_count = 0; | |
1874 | sgd->host_side_addr = skspcl->db_dma_address; | |
1875 | sgd->dev_side_addr = 0; | |
1876 | sgd->next_desc_ptr = 0LL; | |
1877 | ||
1878 | return 1; | |
1879 | } | |
1880 | ||
1881 | #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES | |
1882 | ||
1883 | static void skd_send_internal_skspcl(struct skd_device *skdev, | |
1884 | struct skd_special_context *skspcl, | |
1885 | u8 opcode) | |
1886 | { | |
1887 | struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; | |
1888 | struct skd_scsi_request *scsi; | |
1889 | unsigned char *buf = skspcl->data_buf; | |
1890 | int i; | |
1891 | ||
1892 | if (skspcl->req.state != SKD_REQ_STATE_IDLE) | |
1893 | /* | |
1894 | * A refresh is already in progress. | |
1895 | * Just wait for it to finish. | |
1896 | */ | |
1897 | return; | |
1898 | ||
1899 | SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0); | |
1900 | skspcl->req.state = SKD_REQ_STATE_BUSY; | |
1901 | skspcl->req.id += SKD_ID_INCR; | |
1902 | ||
1903 | scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64]; | |
1904 | scsi->hdr.tag = skspcl->req.id; | |
1905 | ||
1906 | memset(scsi->cdb, 0, sizeof(scsi->cdb)); | |
1907 | ||
1908 | switch (opcode) { | |
1909 | case TEST_UNIT_READY: | |
1910 | scsi->cdb[0] = TEST_UNIT_READY; | |
1911 | sgd->byte_count = 0; | |
1912 | scsi->hdr.sg_list_len_bytes = 0; | |
1913 | break; | |
1914 | ||
1915 | case READ_CAPACITY: | |
1916 | scsi->cdb[0] = READ_CAPACITY; | |
1917 | sgd->byte_count = SKD_N_READ_CAP_BYTES; | |
1918 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | |
1919 | break; | |
1920 | ||
1921 | case INQUIRY: | |
1922 | scsi->cdb[0] = INQUIRY; | |
1923 | scsi->cdb[1] = 0x01; /* evpd */ | |
1924 | scsi->cdb[2] = 0x80; /* serial number page */ | |
1925 | scsi->cdb[4] = 0x10; | |
1926 | sgd->byte_count = 16; | |
1927 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | |
1928 | break; | |
1929 | ||
1930 | case SYNCHRONIZE_CACHE: | |
1931 | scsi->cdb[0] = SYNCHRONIZE_CACHE; | |
1932 | sgd->byte_count = 0; | |
1933 | scsi->hdr.sg_list_len_bytes = 0; | |
1934 | break; | |
1935 | ||
1936 | case WRITE_BUFFER: | |
1937 | scsi->cdb[0] = WRITE_BUFFER; | |
1938 | scsi->cdb[1] = 0x02; | |
1939 | scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; | |
1940 | scsi->cdb[8] = WR_BUF_SIZE & 0xFF; | |
1941 | sgd->byte_count = WR_BUF_SIZE; | |
1942 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | |
1943 | /* fill incrementing byte pattern */ | |
1944 | for (i = 0; i < sgd->byte_count; i++) | |
1945 | buf[i] = i & 0xFF; | |
1946 | break; | |
1947 | ||
1948 | case READ_BUFFER: | |
1949 | scsi->cdb[0] = READ_BUFFER; | |
1950 | scsi->cdb[1] = 0x02; | |
1951 | scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; | |
1952 | scsi->cdb[8] = WR_BUF_SIZE & 0xFF; | |
1953 | sgd->byte_count = WR_BUF_SIZE; | |
1954 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | |
1955 | memset(skspcl->data_buf, 0, sgd->byte_count); | |
1956 | break; | |
1957 | ||
1958 | default: | |
1959 | SKD_ASSERT("Don't know what to send"); | |
1960 | return; | |
1961 | ||
1962 | } | |
1963 | skd_send_special_fitmsg(skdev, skspcl); | |
1964 | } | |
1965 | ||
1966 | static void skd_refresh_device_data(struct skd_device *skdev) | |
1967 | { | |
1968 | struct skd_special_context *skspcl = &skdev->internal_skspcl; | |
1969 | ||
1970 | skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY); | |
1971 | } | |
1972 | ||
1973 | static int skd_chk_read_buf(struct skd_device *skdev, | |
1974 | struct skd_special_context *skspcl) | |
1975 | { | |
1976 | unsigned char *buf = skspcl->data_buf; | |
1977 | int i; | |
1978 | ||
1979 | /* check for incrementing byte pattern */ | |
1980 | for (i = 0; i < WR_BUF_SIZE; i++) | |
1981 | if (buf[i] != (i & 0xFF)) | |
1982 | return 1; | |
1983 | ||
1984 | return 0; | |
1985 | } | |
1986 | ||
1987 | static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key, | |
1988 | u8 code, u8 qual, u8 fruc) | |
1989 | { | |
1990 | /* If the check condition is of special interest, log a message */ | |
1991 | if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02) | |
1992 | && (code == 0x04) && (qual == 0x06)) { | |
1993 | pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/" | |
1994 | "ascq/fruc %02x/%02x/%02x/%02x\n", | |
1995 | skd_name(skdev), key, code, qual, fruc); | |
1996 | } | |
1997 | } | |
1998 | ||
1999 | static void skd_complete_internal(struct skd_device *skdev, | |
2000 | volatile struct fit_completion_entry_v1 | |
2001 | *skcomp, | |
2002 | volatile struct fit_comp_error_info *skerr, | |
2003 | struct skd_special_context *skspcl) | |
2004 | { | |
2005 | u8 *buf = skspcl->data_buf; | |
2006 | u8 status; | |
2007 | int i; | |
2008 | struct skd_scsi_request *scsi = | |
2009 | (struct skd_scsi_request *)&skspcl->msg_buf[64]; | |
2010 | ||
2011 | SKD_ASSERT(skspcl == &skdev->internal_skspcl); | |
2012 | ||
2e44b427 | 2013 | pr_debug("%s:%s:%d complete internal %x\n", |
2014 | skdev->name, __func__, __LINE__, scsi->cdb[0]); | |
e67f86b3 AB |
2015 | |
2016 | skspcl->req.completion = *skcomp; | |
2017 | skspcl->req.state = SKD_REQ_STATE_IDLE; | |
2018 | skspcl->req.id += SKD_ID_INCR; | |
2019 | ||
2020 | status = skspcl->req.completion.status; | |
2021 | ||
2022 | skd_log_check_status(skdev, status, skerr->key, skerr->code, | |
2023 | skerr->qual, skerr->fruc); | |
2024 | ||
2025 | switch (scsi->cdb[0]) { | |
2026 | case TEST_UNIT_READY: | |
2027 | if (status == SAM_STAT_GOOD) | |
2028 | skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); | |
2029 | else if ((status == SAM_STAT_CHECK_CONDITION) && | |
2030 | (skerr->key == MEDIUM_ERROR)) | |
2031 | skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); | |
2032 | else { | |
2033 | if (skdev->state == SKD_DRVR_STATE_STOPPING) { | |
2e44b427 | 2034 | pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n", |
2035 | skdev->name, __func__, __LINE__, | |
2036 | skdev->state); | |
e67f86b3 AB |
2037 | return; |
2038 | } | |
2e44b427 | 2039 | pr_debug("%s:%s:%d **** TUR failed, retry skerr\n", |
2040 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
2041 | skd_send_internal_skspcl(skdev, skspcl, 0x00); |
2042 | } | |
2043 | break; | |
2044 | ||
2045 | case WRITE_BUFFER: | |
2046 | if (status == SAM_STAT_GOOD) | |
2047 | skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER); | |
2048 | else { | |
2049 | if (skdev->state == SKD_DRVR_STATE_STOPPING) { | |
2e44b427 | 2050 | pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n", |
2051 | skdev->name, __func__, __LINE__, | |
2052 | skdev->state); | |
e67f86b3 AB |
2053 | return; |
2054 | } | |
2e44b427 | 2055 | pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n", |
2056 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
2057 | skd_send_internal_skspcl(skdev, skspcl, 0x00); |
2058 | } | |
2059 | break; | |
2060 | ||
2061 | case READ_BUFFER: | |
2062 | if (status == SAM_STAT_GOOD) { | |
2063 | if (skd_chk_read_buf(skdev, skspcl) == 0) | |
2064 | skd_send_internal_skspcl(skdev, skspcl, | |
2065 | READ_CAPACITY); | |
2066 | else { | |
2067 | pr_err( | |
2068 | "(%s):*** W/R Buffer mismatch %d ***\n", | |
2069 | skd_name(skdev), skdev->connect_retries); | |
2070 | if (skdev->connect_retries < | |
2071 | SKD_MAX_CONNECT_RETRIES) { | |
2072 | skdev->connect_retries++; | |
2073 | skd_soft_reset(skdev); | |
2074 | } else { | |
2075 | pr_err( | |
2076 | "(%s): W/R Buffer Connect Error\n", | |
2077 | skd_name(skdev)); | |
2078 | return; | |
2079 | } | |
2080 | } | |
2081 | ||
2082 | } else { | |
2083 | if (skdev->state == SKD_DRVR_STATE_STOPPING) { | |
2e44b427 | 2084 | pr_debug("%s:%s:%d " |
2085 | "read buffer failed, don't send anymore state 0x%x\n", | |
2086 | skdev->name, __func__, __LINE__, | |
2087 | skdev->state); | |
e67f86b3 AB |
2088 | return; |
2089 | } | |
2e44b427 | 2090 | pr_debug("%s:%s:%d " |
2091 | "**** read buffer failed, retry skerr\n", | |
2092 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
2093 | skd_send_internal_skspcl(skdev, skspcl, 0x00); |
2094 | } | |
2095 | break; | |
2096 | ||
2097 | case READ_CAPACITY: | |
2098 | skdev->read_cap_is_valid = 0; | |
2099 | if (status == SAM_STAT_GOOD) { | |
2100 | skdev->read_cap_last_lba = | |
2101 | (buf[0] << 24) | (buf[1] << 16) | | |
2102 | (buf[2] << 8) | buf[3]; | |
2103 | skdev->read_cap_blocksize = | |
2104 | (buf[4] << 24) | (buf[5] << 16) | | |
2105 | (buf[6] << 8) | buf[7]; | |
2106 | ||
2e44b427 | 2107 | pr_debug("%s:%s:%d last lba %d, bs %d\n", |
2108 | skdev->name, __func__, __LINE__, | |
2109 | skdev->read_cap_last_lba, | |
2110 | skdev->read_cap_blocksize); | |
e67f86b3 AB |
2111 | |
2112 | set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); | |
2113 | ||
2114 | skdev->read_cap_is_valid = 1; | |
2115 | ||
2116 | skd_send_internal_skspcl(skdev, skspcl, INQUIRY); | |
2117 | } else if ((status == SAM_STAT_CHECK_CONDITION) && | |
2118 | (skerr->key == MEDIUM_ERROR)) { | |
2119 | skdev->read_cap_last_lba = ~0; | |
2120 | set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); | |
2e44b427 | 2121 | pr_debug("%s:%s:%d " |
2122 | "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n", | |
2123 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
2124 | skd_send_internal_skspcl(skdev, skspcl, INQUIRY); |
2125 | } else { | |
2e44b427 | 2126 | pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n", |
2127 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
2128 | skd_send_internal_skspcl(skdev, skspcl, |
2129 | TEST_UNIT_READY); | |
2130 | } | |
2131 | break; | |
2132 | ||
2133 | case INQUIRY: | |
2134 | skdev->inquiry_is_valid = 0; | |
2135 | if (status == SAM_STAT_GOOD) { | |
2136 | skdev->inquiry_is_valid = 1; | |
2137 | ||
2138 | for (i = 0; i < 12; i++) | |
2139 | skdev->inq_serial_num[i] = buf[i + 4]; | |
2140 | skdev->inq_serial_num[12] = 0; | |
2141 | } | |
2142 | ||
2143 | if (skd_unquiesce_dev(skdev) < 0) | |
2e44b427 | 2144 | pr_debug("%s:%s:%d **** failed, to ONLINE device\n", |
2145 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
2146 | /* connection is complete */ |
2147 | skdev->connect_retries = 0; | |
2148 | break; | |
2149 | ||
2150 | case SYNCHRONIZE_CACHE: | |
2151 | if (status == SAM_STAT_GOOD) | |
2152 | skdev->sync_done = 1; | |
2153 | else | |
2154 | skdev->sync_done = -1; | |
2155 | wake_up_interruptible(&skdev->waitq); | |
2156 | break; | |
2157 | ||
2158 | default: | |
2159 | SKD_ASSERT("we didn't send this"); | |
2160 | } | |
2161 | } | |
2162 | ||
2163 | /* | |
2164 | ***************************************************************************** | |
2165 | * FIT MESSAGES | |
2166 | ***************************************************************************** | |
2167 | */ | |
2168 | ||
2169 | static void skd_send_fitmsg(struct skd_device *skdev, | |
2170 | struct skd_fitmsg_context *skmsg) | |
2171 | { | |
2172 | u64 qcmd; | |
2173 | struct fit_msg_hdr *fmh; | |
2174 | ||
2e44b427 | 2175 | pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n", |
2176 | skdev->name, __func__, __LINE__, | |
2177 | skmsg->mb_dma_address, skdev->in_flight); | |
2178 | pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n", | |
2179 | skdev->name, __func__, __LINE__, | |
2180 | skmsg->msg_buf, skmsg->offset); | |
e67f86b3 AB |
2181 | |
2182 | qcmd = skmsg->mb_dma_address; | |
2183 | qcmd |= FIT_QCMD_QID_NORMAL; | |
2184 | ||
2185 | fmh = (struct fit_msg_hdr *)skmsg->msg_buf; | |
2186 | skmsg->outstanding = fmh->num_protocol_cmds_coalesced; | |
2187 | ||
2188 | if (unlikely(skdev->dbg_level > 1)) { | |
2189 | u8 *bp = (u8 *)skmsg->msg_buf; | |
2190 | int i; | |
2191 | for (i = 0; i < skmsg->length; i += 8) { | |
2e44b427 | 2192 | pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x " |
2193 | "%02x %02x %02x %02x\n", | |
2194 | skdev->name, __func__, __LINE__, | |
2195 | i, bp[i + 0], bp[i + 1], bp[i + 2], | |
2196 | bp[i + 3], bp[i + 4], bp[i + 5], | |
2197 | bp[i + 6], bp[i + 7]); | |
e67f86b3 AB |
2198 | if (i == 0) |
2199 | i = 64 - 8; | |
2200 | } | |
2201 | } | |
2202 | ||
2203 | if (skmsg->length > 256) | |
2204 | qcmd |= FIT_QCMD_MSGSIZE_512; | |
2205 | else if (skmsg->length > 128) | |
2206 | qcmd |= FIT_QCMD_MSGSIZE_256; | |
2207 | else if (skmsg->length > 64) | |
2208 | qcmd |= FIT_QCMD_MSGSIZE_128; | |
2209 | else | |
2210 | /* | |
2211 | * This makes no sense because the FIT msg header is | |
2212 | * 64 bytes. If the msg is only 64 bytes long it has | |
2213 | * no payload. | |
2214 | */ | |
2215 | qcmd |= FIT_QCMD_MSGSIZE_64; | |
2216 | ||
2217 | SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); | |
2218 | ||
2219 | } | |
2220 | ||
2221 | static void skd_send_special_fitmsg(struct skd_device *skdev, | |
2222 | struct skd_special_context *skspcl) | |
2223 | { | |
2224 | u64 qcmd; | |
2225 | ||
2226 | if (unlikely(skdev->dbg_level > 1)) { | |
2227 | u8 *bp = (u8 *)skspcl->msg_buf; | |
2228 | int i; | |
2229 | ||
2230 | for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) { | |
2e44b427 | 2231 | pr_debug("%s:%s:%d spcl[%2d] %02x %02x %02x %02x " |
2232 | "%02x %02x %02x %02x\n", | |
2233 | skdev->name, __func__, __LINE__, i, | |
2234 | bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3], | |
2235 | bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]); | |
e67f86b3 AB |
2236 | if (i == 0) |
2237 | i = 64 - 8; | |
2238 | } | |
2239 | ||
2e44b427 | 2240 | pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n", |
2241 | skdev->name, __func__, __LINE__, | |
2242 | skspcl, skspcl->req.id, skspcl->req.sksg_list, | |
2243 | skspcl->req.sksg_dma_address); | |
e67f86b3 AB |
2244 | for (i = 0; i < skspcl->req.n_sg; i++) { |
2245 | struct fit_sg_descriptor *sgd = | |
2246 | &skspcl->req.sksg_list[i]; | |
2247 | ||
2e44b427 | 2248 | pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " |
2249 | "addr=0x%llx next=0x%llx\n", | |
2250 | skdev->name, __func__, __LINE__, | |
2251 | i, sgd->byte_count, sgd->control, | |
2252 | sgd->host_side_addr, sgd->next_desc_ptr); | |
e67f86b3 AB |
2253 | } |
2254 | } | |
2255 | ||
2256 | /* | |
2257 | * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr | |
2258 | * and one 64-byte SSDI command. | |
2259 | */ | |
2260 | qcmd = skspcl->mb_dma_address; | |
2261 | qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128; | |
2262 | ||
2263 | SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); | |
2264 | } | |
2265 | ||
2266 | /* | |
2267 | ***************************************************************************** | |
2268 | * COMPLETION QUEUE | |
2269 | ***************************************************************************** | |
2270 | */ | |
2271 | ||
2272 | static void skd_complete_other(struct skd_device *skdev, | |
2273 | volatile struct fit_completion_entry_v1 *skcomp, | |
2274 | volatile struct fit_comp_error_info *skerr); | |
2275 | ||
e67f86b3 AB |
2276 | struct sns_info { |
2277 | u8 type; | |
2278 | u8 stat; | |
2279 | u8 key; | |
2280 | u8 asc; | |
2281 | u8 ascq; | |
2282 | u8 mask; | |
2283 | enum skd_check_status_action action; | |
2284 | }; | |
2285 | ||
2286 | static struct sns_info skd_chkstat_table[] = { | |
2287 | /* Good */ | |
2288 | { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c, | |
2289 | SKD_CHECK_STATUS_REPORT_GOOD }, | |
2290 | ||
2291 | /* Smart alerts */ | |
2292 | { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */ | |
2293 | SKD_CHECK_STATUS_REPORT_SMART_ALERT }, | |
2294 | { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */ | |
2295 | SKD_CHECK_STATUS_REPORT_SMART_ALERT }, | |
2296 | { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */ | |
2297 | SKD_CHECK_STATUS_REPORT_SMART_ALERT }, | |
2298 | ||
2299 | /* Retry (with limits) */ | |
2300 | { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */ | |
2301 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | |
2302 | { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */ | |
2303 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | |
2304 | { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */ | |
2305 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | |
2306 | { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */ | |
2307 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | |
2308 | ||
2309 | /* Busy (or about to be) */ | |
2310 | { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */ | |
2311 | SKD_CHECK_STATUS_BUSY_IMMINENT }, | |
2312 | }; | |
2313 | ||
2314 | /* | |
2315 | * Look up status and sense data to decide how to handle the error | |
2316 | * from the device. | |
2317 | * mask says which fields must match e.g., mask=0x18 means check | |
2318 | * type and stat, ignore key, asc, ascq. | |
2319 | */ | |
2320 | ||
38d4a1bb MS |
2321 | static enum skd_check_status_action |
2322 | skd_check_status(struct skd_device *skdev, | |
2323 | u8 cmp_status, volatile struct fit_comp_error_info *skerr) | |
e67f86b3 AB |
2324 | { |
2325 | int i, n; | |
2326 | ||
2327 | pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", | |
2328 | skd_name(skdev), skerr->key, skerr->code, skerr->qual, | |
2329 | skerr->fruc); | |
2330 | ||
2e44b427 | 2331 | pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n", |
2332 | skdev->name, __func__, __LINE__, skerr->type, cmp_status, | |
2333 | skerr->key, skerr->code, skerr->qual, skerr->fruc); | |
e67f86b3 AB |
2334 | |
2335 | /* Does the info match an entry in the good category? */ | |
2336 | n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]); | |
2337 | for (i = 0; i < n; i++) { | |
2338 | struct sns_info *sns = &skd_chkstat_table[i]; | |
2339 | ||
2340 | if (sns->mask & 0x10) | |
2341 | if (skerr->type != sns->type) | |
2342 | continue; | |
2343 | ||
2344 | if (sns->mask & 0x08) | |
2345 | if (cmp_status != sns->stat) | |
2346 | continue; | |
2347 | ||
2348 | if (sns->mask & 0x04) | |
2349 | if (skerr->key != sns->key) | |
2350 | continue; | |
2351 | ||
2352 | if (sns->mask & 0x02) | |
2353 | if (skerr->code != sns->asc) | |
2354 | continue; | |
2355 | ||
2356 | if (sns->mask & 0x01) | |
2357 | if (skerr->qual != sns->ascq) | |
2358 | continue; | |
2359 | ||
2360 | if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) { | |
2361 | pr_err("(%s): SMART Alert: sense key/asc/ascq " | |
2362 | "%02x/%02x/%02x\n", | |
2363 | skd_name(skdev), skerr->key, | |
2364 | skerr->code, skerr->qual); | |
2365 | } | |
2366 | return sns->action; | |
2367 | } | |
2368 | ||
2369 | /* No other match, so nonzero status means error, | |
2370 | * zero status means good | |
2371 | */ | |
2372 | if (cmp_status) { | |
2e44b427 | 2373 | pr_debug("%s:%s:%d status check: error\n", |
2374 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
2375 | return SKD_CHECK_STATUS_REPORT_ERROR; |
2376 | } | |
2377 | ||
2e44b427 | 2378 | pr_debug("%s:%s:%d status check good default\n", |
2379 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
2380 | return SKD_CHECK_STATUS_REPORT_GOOD; |
2381 | } | |
2382 | ||
2383 | static void skd_resolve_req_exception(struct skd_device *skdev, | |
2384 | struct skd_request_context *skreq) | |
2385 | { | |
2386 | u8 cmp_status = skreq->completion.status; | |
2387 | ||
2388 | switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) { | |
2389 | case SKD_CHECK_STATUS_REPORT_GOOD: | |
2390 | case SKD_CHECK_STATUS_REPORT_SMART_ALERT: | |
2391 | skd_end_request(skdev, skreq, 0); | |
2392 | break; | |
2393 | ||
2394 | case SKD_CHECK_STATUS_BUSY_IMMINENT: | |
2395 | skd_log_skreq(skdev, skreq, "retry(busy)"); | |
38d4a1bb | 2396 | blk_requeue_request(skdev->queue, skreq->req); |
e67f86b3 AB |
2397 | pr_info("(%s) drive BUSY imminent\n", skd_name(skdev)); |
2398 | skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; | |
2399 | skdev->timer_countdown = SKD_TIMER_MINUTES(20); | |
2400 | skd_quiesce_dev(skdev); | |
2401 | break; | |
2402 | ||
2403 | case SKD_CHECK_STATUS_REQUEUE_REQUEST: | |
fcd37eb3 JA |
2404 | if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) { |
2405 | skd_log_skreq(skdev, skreq, "retry"); | |
38d4a1bb | 2406 | blk_requeue_request(skdev->queue, skreq->req); |
fcd37eb3 | 2407 | break; |
e67f86b3 AB |
2408 | } |
2409 | /* fall through to report error */ | |
2410 | ||
2411 | case SKD_CHECK_STATUS_REPORT_ERROR: | |
2412 | default: | |
2413 | skd_end_request(skdev, skreq, -EIO); | |
2414 | break; | |
2415 | } | |
2416 | } | |
2417 | ||
e67f86b3 AB |
2418 | /* assume spinlock is already held */ |
2419 | static void skd_release_skreq(struct skd_device *skdev, | |
2420 | struct skd_request_context *skreq) | |
2421 | { | |
2422 | u32 msg_slot; | |
2423 | struct skd_fitmsg_context *skmsg; | |
2424 | ||
2425 | u32 timo_slot; | |
2426 | ||
2427 | /* | |
2428 | * Reclaim the FIT msg buffer if this is | |
2429 | * the first of the requests it carried to | |
2430 | * be completed. The FIT msg buffer used to | |
2431 | * send this request cannot be reused until | |
2432 | * we are sure the s1120 card has copied | |
2433 | * it to its memory. The FIT msg might have | |
2434 | * contained several requests. As soon as | |
2435 | * any of them are completed we know that | |
2436 | * the entire FIT msg was transferred. | |
2437 | * Only the first completed request will | |
2438 | * match the FIT msg buffer id. The FIT | |
2439 | * msg buffer id is immediately updated. | |
2440 | * When subsequent requests complete the FIT | |
2441 | * msg buffer id won't match, so we know | |
2442 | * quite cheaply that it is already done. | |
2443 | */ | |
2444 | msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK; | |
2445 | SKD_ASSERT(msg_slot < skdev->num_fitmsg_context); | |
2446 | ||
2447 | skmsg = &skdev->skmsg_table[msg_slot]; | |
2448 | if (skmsg->id == skreq->fitmsg_id) { | |
2449 | SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY); | |
2450 | SKD_ASSERT(skmsg->outstanding > 0); | |
2451 | skmsg->outstanding--; | |
2452 | if (skmsg->outstanding == 0) { | |
2453 | skmsg->state = SKD_MSG_STATE_IDLE; | |
2454 | skmsg->id += SKD_ID_INCR; | |
2455 | skmsg->next = skdev->skmsg_free_list; | |
2456 | skdev->skmsg_free_list = skmsg; | |
2457 | } | |
2458 | } | |
2459 | ||
2460 | /* | |
2461 | * Decrease the number of active requests. | |
2462 | * Also decrements the count in the timeout slot. | |
2463 | */ | |
2464 | SKD_ASSERT(skdev->in_flight > 0); | |
2465 | skdev->in_flight -= 1; | |
2466 | ||
2467 | timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; | |
2468 | SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0); | |
2469 | skdev->timeout_slot[timo_slot] -= 1; | |
2470 | ||
2471 | /* | |
2472 | * Reset backpointer | |
2473 | */ | |
fcd37eb3 | 2474 | skreq->req = NULL; |
e67f86b3 AB |
2475 | |
2476 | /* | |
2477 | * Reclaim the skd_request_context | |
2478 | */ | |
2479 | skreq->state = SKD_REQ_STATE_IDLE; | |
2480 | skreq->id += SKD_ID_INCR; | |
2481 | skreq->next = skdev->skreq_free_list; | |
2482 | skdev->skreq_free_list = skreq; | |
2483 | } | |
2484 | ||
2485 | #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA | |
2486 | ||
2487 | static void skd_do_inq_page_00(struct skd_device *skdev, | |
2488 | volatile struct fit_completion_entry_v1 *skcomp, | |
2489 | volatile struct fit_comp_error_info *skerr, | |
2490 | uint8_t *cdb, uint8_t *buf) | |
2491 | { | |
2492 | uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size; | |
2493 | ||
2494 | /* Caller requested "supported pages". The driver needs to insert | |
2495 | * its page. | |
2496 | */ | |
2e44b427 | 2497 | pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n", |
2498 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
2499 | |
2500 | /* If the device rejected the request because the CDB was | |
2501 | * improperly formed, then just leave. | |
2502 | */ | |
2503 | if (skcomp->status == SAM_STAT_CHECK_CONDITION && | |
2504 | skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24) | |
2505 | return; | |
2506 | ||
2507 | /* Get the amount of space the caller allocated */ | |
2508 | max_bytes = (cdb[3] << 8) | cdb[4]; | |
2509 | ||
2510 | /* Get the number of pages actually returned by the device */ | |
2511 | drive_pages = (buf[2] << 8) | buf[3]; | |
2512 | drive_bytes = drive_pages + 4; | |
2513 | new_size = drive_pages + 1; | |
2514 | ||
2515 | /* Supported pages must be in numerical order, so find where | |
2516 | * the driver page needs to be inserted into the list of | |
2517 | * pages returned by the device. | |
2518 | */ | |
2519 | for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) { | |
2520 | if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE) | |
2521 | return; /* Device using this page code. abort */ | |
2522 | else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE) | |
2523 | break; | |
2524 | } | |
2525 | ||
2526 | if (insert_pt < max_bytes) { | |
2527 | uint16_t u; | |
2528 | ||
2529 | /* Shift everything up one byte to make room. */ | |
2530 | for (u = new_size + 3; u > insert_pt; u--) | |
2531 | buf[u] = buf[u - 1]; | |
2532 | buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE; | |
2533 | ||
2534 | /* SCSI byte order increment of num_returned_bytes by 1 */ | |
2535 | skcomp->num_returned_bytes = | |
2536 | be32_to_cpu(skcomp->num_returned_bytes) + 1; | |
2537 | skcomp->num_returned_bytes = | |
2538 | be32_to_cpu(skcomp->num_returned_bytes); | |
2539 | } | |
2540 | ||
2541 | /* update page length field to reflect the driver's page too */ | |
2542 | buf[2] = (uint8_t)((new_size >> 8) & 0xFF); | |
2543 | buf[3] = (uint8_t)((new_size >> 0) & 0xFF); | |
2544 | } | |
2545 | ||
2546 | static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width) | |
2547 | { | |
2548 | int pcie_reg; | |
2549 | u16 pci_bus_speed; | |
2550 | u8 pci_lanes; | |
2551 | ||
2552 | pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); | |
2553 | if (pcie_reg) { | |
2554 | u16 linksta; | |
2555 | pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta); | |
2556 | ||
2557 | pci_bus_speed = linksta & 0xF; | |
2558 | pci_lanes = (linksta & 0x3F0) >> 4; | |
2559 | } else { | |
2560 | *speed = STEC_LINK_UNKNOWN; | |
2561 | *width = 0xFF; | |
2562 | return; | |
2563 | } | |
2564 | ||
2565 | switch (pci_bus_speed) { | |
2566 | case 1: | |
2567 | *speed = STEC_LINK_2_5GTS; | |
2568 | break; | |
2569 | case 2: | |
2570 | *speed = STEC_LINK_5GTS; | |
2571 | break; | |
2572 | case 3: | |
2573 | *speed = STEC_LINK_8GTS; | |
2574 | break; | |
2575 | default: | |
2576 | *speed = STEC_LINK_UNKNOWN; | |
2577 | break; | |
2578 | } | |
2579 | ||
2580 | if (pci_lanes <= 0x20) | |
2581 | *width = pci_lanes; | |
2582 | else | |
2583 | *width = 0xFF; | |
2584 | } | |
2585 | ||
2586 | static void skd_do_inq_page_da(struct skd_device *skdev, | |
2587 | volatile struct fit_completion_entry_v1 *skcomp, | |
2588 | volatile struct fit_comp_error_info *skerr, | |
2589 | uint8_t *cdb, uint8_t *buf) | |
2590 | { | |
fec23f63 | 2591 | struct pci_dev *pdev = skdev->pdev; |
e67f86b3 AB |
2592 | unsigned max_bytes; |
2593 | struct driver_inquiry_data inq; | |
2594 | u16 val; | |
2595 | ||
2e44b427 | 2596 | pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n", |
2597 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
2598 | |
2599 | memset(&inq, 0, sizeof(inq)); | |
2600 | ||
2601 | inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE; | |
2602 | ||
fec23f63 BZ |
2603 | skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes); |
2604 | inq.pcie_bus_number = cpu_to_be16(pdev->bus->number); | |
2605 | inq.pcie_device_number = PCI_SLOT(pdev->devfn); | |
2606 | inq.pcie_function_number = PCI_FUNC(pdev->devfn); | |
e67f86b3 | 2607 | |
fec23f63 BZ |
2608 | pci_read_config_word(pdev, PCI_VENDOR_ID, &val); |
2609 | inq.pcie_vendor_id = cpu_to_be16(val); | |
e67f86b3 | 2610 | |
fec23f63 BZ |
2611 | pci_read_config_word(pdev, PCI_DEVICE_ID, &val); |
2612 | inq.pcie_device_id = cpu_to_be16(val); | |
e67f86b3 | 2613 | |
fec23f63 BZ |
2614 | pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val); |
2615 | inq.pcie_subsystem_vendor_id = cpu_to_be16(val); | |
e67f86b3 | 2616 | |
fec23f63 BZ |
2617 | pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val); |
2618 | inq.pcie_subsystem_device_id = cpu_to_be16(val); | |
e67f86b3 AB |
2619 | |
2620 | /* Driver version, fixed lenth, padded with spaces on the right */ | |
2621 | inq.driver_version_length = sizeof(inq.driver_version); | |
2622 | memset(&inq.driver_version, ' ', sizeof(inq.driver_version)); | |
2623 | memcpy(inq.driver_version, DRV_VER_COMPL, | |
2624 | min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL))); | |
2625 | ||
2626 | inq.page_length = cpu_to_be16((sizeof(inq) - 4)); | |
2627 | ||
2628 | /* Clear the error set by the device */ | |
2629 | skcomp->status = SAM_STAT_GOOD; | |
2630 | memset((void *)skerr, 0, sizeof(*skerr)); | |
2631 | ||
2632 | /* copy response into output buffer */ | |
2633 | max_bytes = (cdb[3] << 8) | cdb[4]; | |
2634 | memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq))); | |
2635 | ||
2636 | skcomp->num_returned_bytes = | |
2637 | be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq))); | |
2638 | } | |
2639 | ||
2640 | static void skd_do_driver_inq(struct skd_device *skdev, | |
2641 | volatile struct fit_completion_entry_v1 *skcomp, | |
2642 | volatile struct fit_comp_error_info *skerr, | |
2643 | uint8_t *cdb, uint8_t *buf) | |
2644 | { | |
2645 | if (!buf) | |
2646 | return; | |
2647 | else if (cdb[0] != INQUIRY) | |
2648 | return; /* Not an INQUIRY */ | |
2649 | else if ((cdb[1] & 1) == 0) | |
2650 | return; /* EVPD not set */ | |
2651 | else if (cdb[2] == 0) | |
2652 | /* Need to add driver's page to supported pages list */ | |
2653 | skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf); | |
2654 | else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE) | |
2655 | /* Caller requested driver's page */ | |
2656 | skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf); | |
2657 | } | |
2658 | ||
2659 | static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg) | |
2660 | { | |
2661 | if (!sg) | |
2662 | return NULL; | |
2663 | if (!sg_page(sg)) | |
2664 | return NULL; | |
2665 | return sg_virt(sg); | |
2666 | } | |
2667 | ||
2668 | static void skd_process_scsi_inq(struct skd_device *skdev, | |
2669 | volatile struct fit_completion_entry_v1 | |
2670 | *skcomp, | |
2671 | volatile struct fit_comp_error_info *skerr, | |
2672 | struct skd_special_context *skspcl) | |
2673 | { | |
2674 | uint8_t *buf; | |
2675 | struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf; | |
2676 | struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1]; | |
2677 | ||
2678 | dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg, | |
2679 | skspcl->req.sg_data_dir); | |
2680 | buf = skd_sg_1st_page_ptr(skspcl->req.sg); | |
2681 | ||
2682 | if (buf) | |
2683 | skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf); | |
2684 | } | |
2685 | ||
2686 | ||
2687 | static int skd_isr_completion_posted(struct skd_device *skdev, | |
2688 | int limit, int *enqueued) | |
2689 | { | |
2690 | volatile struct fit_completion_entry_v1 *skcmp = NULL; | |
2691 | volatile struct fit_comp_error_info *skerr; | |
2692 | u16 req_id; | |
2693 | u32 req_slot; | |
2694 | struct skd_request_context *skreq; | |
2695 | u16 cmp_cntxt = 0; | |
2696 | u8 cmp_status = 0; | |
2697 | u8 cmp_cycle = 0; | |
2698 | u32 cmp_bytes = 0; | |
2699 | int rc = 0; | |
2700 | int processed = 0; | |
e67f86b3 AB |
2701 | |
2702 | for (;; ) { | |
2703 | SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); | |
2704 | ||
2705 | skcmp = &skdev->skcomp_table[skdev->skcomp_ix]; | |
2706 | cmp_cycle = skcmp->cycle; | |
2707 | cmp_cntxt = skcmp->tag; | |
2708 | cmp_status = skcmp->status; | |
2709 | cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes); | |
2710 | ||
2711 | skerr = &skdev->skerr_table[skdev->skcomp_ix]; | |
2712 | ||
2e44b427 | 2713 | pr_debug("%s:%s:%d " |
2714 | "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d " | |
2715 | "busy=%d rbytes=0x%x proto=%d\n", | |
2716 | skdev->name, __func__, __LINE__, skdev->skcomp_cycle, | |
2717 | skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status, | |
2718 | skdev->in_flight, cmp_bytes, skdev->proto_ver); | |
e67f86b3 AB |
2719 | |
2720 | if (cmp_cycle != skdev->skcomp_cycle) { | |
2e44b427 | 2721 | pr_debug("%s:%s:%d end of completions\n", |
2722 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
2723 | break; |
2724 | } | |
2725 | /* | |
2726 | * Update the completion queue head index and possibly | |
2727 | * the completion cycle count. 8-bit wrap-around. | |
2728 | */ | |
2729 | skdev->skcomp_ix++; | |
2730 | if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) { | |
2731 | skdev->skcomp_ix = 0; | |
2732 | skdev->skcomp_cycle++; | |
2733 | } | |
2734 | ||
2735 | /* | |
2736 | * The command context is a unique 32-bit ID. The low order | |
2737 | * bits help locate the request. The request is usually a | |
2738 | * r/w request (see skd_start() above) or a special request. | |
2739 | */ | |
2740 | req_id = cmp_cntxt; | |
2741 | req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK; | |
2742 | ||
2743 | /* Is this other than a r/w request? */ | |
2744 | if (req_slot >= skdev->num_req_context) { | |
2745 | /* | |
2746 | * This is not a completion for a r/w request. | |
2747 | */ | |
2748 | skd_complete_other(skdev, skcmp, skerr); | |
2749 | continue; | |
2750 | } | |
2751 | ||
2752 | skreq = &skdev->skreq_table[req_slot]; | |
2753 | ||
2754 | /* | |
2755 | * Make sure the request ID for the slot matches. | |
2756 | */ | |
2757 | if (skreq->id != req_id) { | |
2e44b427 | 2758 | pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n", |
2759 | skdev->name, __func__, __LINE__, | |
2760 | req_id, skreq->id); | |
e67f86b3 AB |
2761 | { |
2762 | u16 new_id = cmp_cntxt; | |
2763 | pr_err("(%s): Completion mismatch " | |
2764 | "comp_id=0x%04x skreq=0x%04x new=0x%04x\n", | |
2765 | skd_name(skdev), req_id, | |
2766 | skreq->id, new_id); | |
2767 | ||
2768 | continue; | |
2769 | } | |
2770 | } | |
2771 | ||
2772 | SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY); | |
2773 | ||
2774 | if (skreq->state == SKD_REQ_STATE_ABORTED) { | |
2e44b427 | 2775 | pr_debug("%s:%s:%d reclaim req %p id=%04x\n", |
2776 | skdev->name, __func__, __LINE__, | |
2777 | skreq, skreq->id); | |
e67f86b3 AB |
2778 | /* a previously timed out command can |
2779 | * now be cleaned up */ | |
2780 | skd_release_skreq(skdev, skreq); | |
2781 | continue; | |
2782 | } | |
2783 | ||
2784 | skreq->completion = *skcmp; | |
2785 | if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) { | |
2786 | skreq->err_info = *skerr; | |
2787 | skd_log_check_status(skdev, cmp_status, skerr->key, | |
2788 | skerr->code, skerr->qual, | |
2789 | skerr->fruc); | |
2790 | } | |
2791 | /* Release DMA resources for the request. */ | |
2792 | if (skreq->n_sg > 0) | |
2793 | skd_postop_sg_list(skdev, skreq); | |
2794 | ||
fcd37eb3 | 2795 | if (!skreq->req) { |
2e44b427 | 2796 | pr_debug("%s:%s:%d NULL backptr skdreq %p, " |
2797 | "req=0x%x req_id=0x%x\n", | |
2798 | skdev->name, __func__, __LINE__, | |
2799 | skreq, skreq->id, req_id); | |
e67f86b3 AB |
2800 | } else { |
2801 | /* | |
2802 | * Capture the outcome and post it back to the | |
2803 | * native request. | |
2804 | */ | |
fcd37eb3 JA |
2805 | if (likely(cmp_status == SAM_STAT_GOOD)) |
2806 | skd_end_request(skdev, skreq, 0); | |
2807 | else | |
e67f86b3 | 2808 | skd_resolve_req_exception(skdev, skreq); |
e67f86b3 AB |
2809 | } |
2810 | ||
2811 | /* | |
2812 | * Release the skreq, its FIT msg (if one), timeout slot, | |
2813 | * and queue depth. | |
2814 | */ | |
2815 | skd_release_skreq(skdev, skreq); | |
2816 | ||
2817 | /* skd_isr_comp_limit equal zero means no limit */ | |
2818 | if (limit) { | |
2819 | if (++processed >= limit) { | |
2820 | rc = 1; | |
2821 | break; | |
2822 | } | |
2823 | } | |
2824 | } | |
2825 | ||
2826 | if ((skdev->state == SKD_DRVR_STATE_PAUSING) | |
2827 | && (skdev->in_flight) == 0) { | |
2828 | skdev->state = SKD_DRVR_STATE_PAUSED; | |
2829 | wake_up_interruptible(&skdev->waitq); | |
2830 | } | |
2831 | ||
2832 | return rc; | |
2833 | } | |
2834 | ||
2835 | static void skd_complete_other(struct skd_device *skdev, | |
2836 | volatile struct fit_completion_entry_v1 *skcomp, | |
2837 | volatile struct fit_comp_error_info *skerr) | |
2838 | { | |
2839 | u32 req_id = 0; | |
2840 | u32 req_table; | |
2841 | u32 req_slot; | |
2842 | struct skd_special_context *skspcl; | |
2843 | ||
2844 | req_id = skcomp->tag; | |
2845 | req_table = req_id & SKD_ID_TABLE_MASK; | |
2846 | req_slot = req_id & SKD_ID_SLOT_MASK; | |
2847 | ||
2e44b427 | 2848 | pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n", |
2849 | skdev->name, __func__, __LINE__, | |
2850 | req_table, req_id, req_slot); | |
e67f86b3 AB |
2851 | |
2852 | /* | |
2853 | * Based on the request id, determine how to dispatch this completion. | |
2854 | * This swich/case is finding the good cases and forwarding the | |
2855 | * completion entry. Errors are reported below the switch. | |
2856 | */ | |
2857 | switch (req_table) { | |
2858 | case SKD_ID_RW_REQUEST: | |
2859 | /* | |
2860 | * The caller, skd_completion_posted_isr() above, | |
2861 | * handles r/w requests. The only way we get here | |
2862 | * is if the req_slot is out of bounds. | |
2863 | */ | |
2864 | break; | |
2865 | ||
2866 | case SKD_ID_SPECIAL_REQUEST: | |
2867 | /* | |
2868 | * Make sure the req_slot is in bounds and that the id | |
2869 | * matches. | |
2870 | */ | |
2871 | if (req_slot < skdev->n_special) { | |
2872 | skspcl = &skdev->skspcl_table[req_slot]; | |
2873 | if (skspcl->req.id == req_id && | |
2874 | skspcl->req.state == SKD_REQ_STATE_BUSY) { | |
2875 | skd_complete_special(skdev, | |
2876 | skcomp, skerr, skspcl); | |
2877 | return; | |
2878 | } | |
2879 | } | |
2880 | break; | |
2881 | ||
2882 | case SKD_ID_INTERNAL: | |
2883 | if (req_slot == 0) { | |
2884 | skspcl = &skdev->internal_skspcl; | |
2885 | if (skspcl->req.id == req_id && | |
2886 | skspcl->req.state == SKD_REQ_STATE_BUSY) { | |
2887 | skd_complete_internal(skdev, | |
2888 | skcomp, skerr, skspcl); | |
2889 | return; | |
2890 | } | |
2891 | } | |
2892 | break; | |
2893 | ||
2894 | case SKD_ID_FIT_MSG: | |
2895 | /* | |
2896 | * These id's should never appear in a completion record. | |
2897 | */ | |
2898 | break; | |
2899 | ||
2900 | default: | |
2901 | /* | |
2902 | * These id's should never appear anywhere; | |
2903 | */ | |
2904 | break; | |
2905 | } | |
2906 | ||
2907 | /* | |
2908 | * If we get here it is a bad or stale id. | |
2909 | */ | |
2910 | } | |
2911 | ||
2912 | static void skd_complete_special(struct skd_device *skdev, | |
2913 | volatile struct fit_completion_entry_v1 | |
2914 | *skcomp, | |
2915 | volatile struct fit_comp_error_info *skerr, | |
2916 | struct skd_special_context *skspcl) | |
2917 | { | |
2e44b427 | 2918 | pr_debug("%s:%s:%d completing special request %p\n", |
2919 | skdev->name, __func__, __LINE__, skspcl); | |
e67f86b3 AB |
2920 | if (skspcl->orphaned) { |
2921 | /* Discard orphaned request */ | |
2922 | /* ?: Can this release directly or does it need | |
2923 | * to use a worker? */ | |
2e44b427 | 2924 | pr_debug("%s:%s:%d release orphaned %p\n", |
2925 | skdev->name, __func__, __LINE__, skspcl); | |
e67f86b3 AB |
2926 | skd_release_special(skdev, skspcl); |
2927 | return; | |
2928 | } | |
2929 | ||
2930 | skd_process_scsi_inq(skdev, skcomp, skerr, skspcl); | |
2931 | ||
2932 | skspcl->req.state = SKD_REQ_STATE_COMPLETED; | |
2933 | skspcl->req.completion = *skcomp; | |
2934 | skspcl->req.err_info = *skerr; | |
2935 | ||
2936 | skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key, | |
2937 | skerr->code, skerr->qual, skerr->fruc); | |
2938 | ||
2939 | wake_up_interruptible(&skdev->waitq); | |
2940 | } | |
2941 | ||
2942 | /* assume spinlock is already held */ | |
2943 | static void skd_release_special(struct skd_device *skdev, | |
2944 | struct skd_special_context *skspcl) | |
2945 | { | |
2946 | int i, was_depleted; | |
2947 | ||
2948 | for (i = 0; i < skspcl->req.n_sg; i++) { | |
e67f86b3 AB |
2949 | struct page *page = sg_page(&skspcl->req.sg[i]); |
2950 | __free_page(page); | |
2951 | } | |
2952 | ||
2953 | was_depleted = (skdev->skspcl_free_list == NULL); | |
2954 | ||
2955 | skspcl->req.state = SKD_REQ_STATE_IDLE; | |
2956 | skspcl->req.id += SKD_ID_INCR; | |
2957 | skspcl->req.next = | |
2958 | (struct skd_request_context *)skdev->skspcl_free_list; | |
2959 | skdev->skspcl_free_list = (struct skd_special_context *)skspcl; | |
2960 | ||
2961 | if (was_depleted) { | |
2e44b427 | 2962 | pr_debug("%s:%s:%d skspcl was depleted\n", |
2963 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
2964 | /* Free list was depleted. Their might be waiters. */ |
2965 | wake_up_interruptible(&skdev->waitq); | |
2966 | } | |
2967 | } | |
2968 | ||
2969 | static void skd_reset_skcomp(struct skd_device *skdev) | |
2970 | { | |
2971 | u32 nbytes; | |
2972 | struct fit_completion_entry_v1 *skcomp; | |
2973 | ||
2974 | nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY; | |
2975 | nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; | |
2976 | ||
2977 | memset(skdev->skcomp_table, 0, nbytes); | |
2978 | ||
2979 | skdev->skcomp_ix = 0; | |
2980 | skdev->skcomp_cycle = 1; | |
2981 | } | |
2982 | ||
2983 | /* | |
2984 | ***************************************************************************** | |
2985 | * INTERRUPTS | |
2986 | ***************************************************************************** | |
2987 | */ | |
2988 | static void skd_completion_worker(struct work_struct *work) | |
2989 | { | |
2990 | struct skd_device *skdev = | |
2991 | container_of(work, struct skd_device, completion_worker); | |
2992 | unsigned long flags; | |
2993 | int flush_enqueued = 0; | |
2994 | ||
2995 | spin_lock_irqsave(&skdev->lock, flags); | |
2996 | ||
2997 | /* | |
2998 | * pass in limit=0, which means no limit.. | |
2999 | * process everything in compq | |
3000 | */ | |
3001 | skd_isr_completion_posted(skdev, 0, &flush_enqueued); | |
3002 | skd_request_fn(skdev->queue); | |
3003 | ||
3004 | spin_unlock_irqrestore(&skdev->lock, flags); | |
3005 | } | |
3006 | ||
3007 | static void skd_isr_msg_from_dev(struct skd_device *skdev); | |
3008 | ||
3009 | irqreturn_t | |
3010 | static skd_isr(int irq, void *ptr) | |
3011 | { | |
3012 | struct skd_device *skdev; | |
3013 | u32 intstat; | |
3014 | u32 ack; | |
3015 | int rc = 0; | |
3016 | int deferred = 0; | |
3017 | int flush_enqueued = 0; | |
3018 | ||
3019 | skdev = (struct skd_device *)ptr; | |
3020 | spin_lock(&skdev->lock); | |
3021 | ||
3022 | for (;; ) { | |
3023 | intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST); | |
3024 | ||
3025 | ack = FIT_INT_DEF_MASK; | |
3026 | ack &= intstat; | |
3027 | ||
2e44b427 | 3028 | pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n", |
3029 | skdev->name, __func__, __LINE__, intstat, ack); | |
e67f86b3 AB |
3030 | |
3031 | /* As long as there is an int pending on device, keep | |
3032 | * running loop. When none, get out, but if we've never | |
3033 | * done any processing, call completion handler? | |
3034 | */ | |
3035 | if (ack == 0) { | |
3036 | /* No interrupts on device, but run the completion | |
3037 | * processor anyway? | |
3038 | */ | |
3039 | if (rc == 0) | |
3040 | if (likely (skdev->state | |
3041 | == SKD_DRVR_STATE_ONLINE)) | |
3042 | deferred = 1; | |
3043 | break; | |
3044 | } | |
3045 | ||
3046 | rc = IRQ_HANDLED; | |
3047 | ||
3048 | SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST); | |
3049 | ||
3050 | if (likely((skdev->state != SKD_DRVR_STATE_LOAD) && | |
3051 | (skdev->state != SKD_DRVR_STATE_STOPPING))) { | |
3052 | if (intstat & FIT_ISH_COMPLETION_POSTED) { | |
3053 | /* | |
3054 | * If we have already deferred completion | |
3055 | * processing, don't bother running it again | |
3056 | */ | |
3057 | if (deferred == 0) | |
3058 | deferred = | |
3059 | skd_isr_completion_posted(skdev, | |
3060 | skd_isr_comp_limit, &flush_enqueued); | |
3061 | } | |
3062 | ||
3063 | if (intstat & FIT_ISH_FW_STATE_CHANGE) { | |
3064 | skd_isr_fwstate(skdev); | |
3065 | if (skdev->state == SKD_DRVR_STATE_FAULT || | |
3066 | skdev->state == | |
3067 | SKD_DRVR_STATE_DISAPPEARED) { | |
3068 | spin_unlock(&skdev->lock); | |
3069 | return rc; | |
3070 | } | |
3071 | } | |
3072 | ||
3073 | if (intstat & FIT_ISH_MSG_FROM_DEV) | |
3074 | skd_isr_msg_from_dev(skdev); | |
3075 | } | |
3076 | } | |
3077 | ||
3078 | if (unlikely(flush_enqueued)) | |
3079 | skd_request_fn(skdev->queue); | |
3080 | ||
3081 | if (deferred) | |
3082 | schedule_work(&skdev->completion_worker); | |
3083 | else if (!flush_enqueued) | |
3084 | skd_request_fn(skdev->queue); | |
3085 | ||
3086 | spin_unlock(&skdev->lock); | |
3087 | ||
3088 | return rc; | |
3089 | } | |
3090 | ||
e67f86b3 AB |
3091 | static void skd_drive_fault(struct skd_device *skdev) |
3092 | { | |
3093 | skdev->state = SKD_DRVR_STATE_FAULT; | |
3094 | pr_err("(%s): Drive FAULT\n", skd_name(skdev)); | |
3095 | } | |
3096 | ||
3097 | static void skd_drive_disappeared(struct skd_device *skdev) | |
3098 | { | |
3099 | skdev->state = SKD_DRVR_STATE_DISAPPEARED; | |
3100 | pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev)); | |
3101 | } | |
3102 | ||
3103 | static void skd_isr_fwstate(struct skd_device *skdev) | |
3104 | { | |
3105 | u32 sense; | |
3106 | u32 state; | |
3107 | u32 mtd; | |
3108 | int prev_driver_state = skdev->state; | |
3109 | ||
3110 | sense = SKD_READL(skdev, FIT_STATUS); | |
3111 | state = sense & FIT_SR_DRIVE_STATE_MASK; | |
3112 | ||
3113 | pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n", | |
3114 | skd_name(skdev), | |
3115 | skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, | |
3116 | skd_drive_state_to_str(state), state); | |
3117 | ||
3118 | skdev->drive_state = state; | |
3119 | ||
3120 | switch (skdev->drive_state) { | |
3121 | case FIT_SR_DRIVE_INIT: | |
3122 | if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) { | |
3123 | skd_disable_interrupts(skdev); | |
3124 | break; | |
3125 | } | |
3126 | if (skdev->state == SKD_DRVR_STATE_RESTARTING) | |
3127 | skd_recover_requests(skdev, 0); | |
3128 | if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) { | |
3129 | skdev->timer_countdown = SKD_STARTING_TIMO; | |
3130 | skdev->state = SKD_DRVR_STATE_STARTING; | |
3131 | skd_soft_reset(skdev); | |
3132 | break; | |
3133 | } | |
3134 | mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0); | |
3135 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
3136 | skdev->last_mtd = mtd; | |
3137 | break; | |
3138 | ||
3139 | case FIT_SR_DRIVE_ONLINE: | |
3140 | skdev->cur_max_queue_depth = skd_max_queue_depth; | |
3141 | if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth) | |
3142 | skdev->cur_max_queue_depth = skdev->dev_max_queue_depth; | |
3143 | ||
3144 | skdev->queue_low_water_mark = | |
3145 | skdev->cur_max_queue_depth * 2 / 3 + 1; | |
3146 | if (skdev->queue_low_water_mark < 1) | |
3147 | skdev->queue_low_water_mark = 1; | |
3148 | pr_info( | |
3149 | "(%s): Queue depth limit=%d dev=%d lowat=%d\n", | |
3150 | skd_name(skdev), | |
3151 | skdev->cur_max_queue_depth, | |
3152 | skdev->dev_max_queue_depth, skdev->queue_low_water_mark); | |
3153 | ||
3154 | skd_refresh_device_data(skdev); | |
3155 | break; | |
3156 | ||
3157 | case FIT_SR_DRIVE_BUSY: | |
3158 | skdev->state = SKD_DRVR_STATE_BUSY; | |
3159 | skdev->timer_countdown = SKD_BUSY_TIMO; | |
3160 | skd_quiesce_dev(skdev); | |
3161 | break; | |
3162 | case FIT_SR_DRIVE_BUSY_SANITIZE: | |
3163 | /* set timer for 3 seconds, we'll abort any unfinished | |
3164 | * commands after that expires | |
3165 | */ | |
3166 | skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; | |
3167 | skdev->timer_countdown = SKD_TIMER_SECONDS(3); | |
6a5ec65b | 3168 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
3169 | break; |
3170 | case FIT_SR_DRIVE_BUSY_ERASE: | |
3171 | skdev->state = SKD_DRVR_STATE_BUSY_ERASE; | |
3172 | skdev->timer_countdown = SKD_BUSY_TIMO; | |
3173 | break; | |
3174 | case FIT_SR_DRIVE_OFFLINE: | |
3175 | skdev->state = SKD_DRVR_STATE_IDLE; | |
3176 | break; | |
3177 | case FIT_SR_DRIVE_SOFT_RESET: | |
3178 | switch (skdev->state) { | |
3179 | case SKD_DRVR_STATE_STARTING: | |
3180 | case SKD_DRVR_STATE_RESTARTING: | |
3181 | /* Expected by a caller of skd_soft_reset() */ | |
3182 | break; | |
3183 | default: | |
3184 | skdev->state = SKD_DRVR_STATE_RESTARTING; | |
3185 | break; | |
3186 | } | |
3187 | break; | |
3188 | case FIT_SR_DRIVE_FW_BOOTING: | |
2e44b427 | 3189 | pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n", |
3190 | skdev->name, __func__, __LINE__, skdev->name); | |
e67f86b3 AB |
3191 | skdev->state = SKD_DRVR_STATE_WAIT_BOOT; |
3192 | skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; | |
3193 | break; | |
3194 | ||
3195 | case FIT_SR_DRIVE_DEGRADED: | |
3196 | case FIT_SR_PCIE_LINK_DOWN: | |
3197 | case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: | |
3198 | break; | |
3199 | ||
3200 | case FIT_SR_DRIVE_FAULT: | |
3201 | skd_drive_fault(skdev); | |
3202 | skd_recover_requests(skdev, 0); | |
6a5ec65b | 3203 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
3204 | break; |
3205 | ||
3206 | /* PCIe bus returned all Fs? */ | |
3207 | case 0xFF: | |
3208 | pr_info("(%s): state=0x%x sense=0x%x\n", | |
3209 | skd_name(skdev), state, sense); | |
3210 | skd_drive_disappeared(skdev); | |
3211 | skd_recover_requests(skdev, 0); | |
6a5ec65b | 3212 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
3213 | break; |
3214 | default: | |
3215 | /* | |
3216 | * Uknown FW State. Wait for a state we recognize. | |
3217 | */ | |
3218 | break; | |
3219 | } | |
3220 | pr_err("(%s): Driver state %s(%d)=>%s(%d)\n", | |
3221 | skd_name(skdev), | |
3222 | skd_skdev_state_to_str(prev_driver_state), prev_driver_state, | |
3223 | skd_skdev_state_to_str(skdev->state), skdev->state); | |
3224 | } | |
3225 | ||
3226 | static void skd_recover_requests(struct skd_device *skdev, int requeue) | |
3227 | { | |
3228 | int i; | |
3229 | ||
3230 | for (i = 0; i < skdev->num_req_context; i++) { | |
3231 | struct skd_request_context *skreq = &skdev->skreq_table[i]; | |
3232 | ||
3233 | if (skreq->state == SKD_REQ_STATE_BUSY) { | |
3234 | skd_log_skreq(skdev, skreq, "recover"); | |
3235 | ||
3236 | SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0); | |
fcd37eb3 | 3237 | SKD_ASSERT(skreq->req != NULL); |
e67f86b3 AB |
3238 | |
3239 | /* Release DMA resources for the request. */ | |
3240 | if (skreq->n_sg > 0) | |
3241 | skd_postop_sg_list(skdev, skreq); | |
3242 | ||
fcd37eb3 JA |
3243 | if (requeue && |
3244 | (unsigned long) ++skreq->req->special < | |
3245 | SKD_MAX_RETRIES) | |
38d4a1bb | 3246 | blk_requeue_request(skdev->queue, skreq->req); |
fcd37eb3 | 3247 | else |
e67f86b3 AB |
3248 | skd_end_request(skdev, skreq, -EIO); |
3249 | ||
fcd37eb3 | 3250 | skreq->req = NULL; |
e67f86b3 AB |
3251 | |
3252 | skreq->state = SKD_REQ_STATE_IDLE; | |
3253 | skreq->id += SKD_ID_INCR; | |
e67f86b3 AB |
3254 | } |
3255 | if (i > 0) | |
3256 | skreq[-1].next = skreq; | |
3257 | skreq->next = NULL; | |
3258 | } | |
3259 | skdev->skreq_free_list = skdev->skreq_table; | |
3260 | ||
3261 | for (i = 0; i < skdev->num_fitmsg_context; i++) { | |
3262 | struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i]; | |
3263 | ||
3264 | if (skmsg->state == SKD_MSG_STATE_BUSY) { | |
3265 | skd_log_skmsg(skdev, skmsg, "salvaged"); | |
3266 | SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0); | |
3267 | skmsg->state = SKD_MSG_STATE_IDLE; | |
3268 | skmsg->id += SKD_ID_INCR; | |
3269 | } | |
3270 | if (i > 0) | |
3271 | skmsg[-1].next = skmsg; | |
3272 | skmsg->next = NULL; | |
3273 | } | |
3274 | skdev->skmsg_free_list = skdev->skmsg_table; | |
3275 | ||
3276 | for (i = 0; i < skdev->n_special; i++) { | |
3277 | struct skd_special_context *skspcl = &skdev->skspcl_table[i]; | |
3278 | ||
3279 | /* If orphaned, reclaim it because it has already been reported | |
3280 | * to the process as an error (it was just waiting for | |
3281 | * a completion that didn't come, and now it will never come) | |
3282 | * If busy, change to a state that will cause it to error | |
3283 | * out in the wait routine and let it do the normal | |
3284 | * reporting and reclaiming | |
3285 | */ | |
3286 | if (skspcl->req.state == SKD_REQ_STATE_BUSY) { | |
3287 | if (skspcl->orphaned) { | |
2e44b427 | 3288 | pr_debug("%s:%s:%d orphaned %p\n", |
3289 | skdev->name, __func__, __LINE__, | |
3290 | skspcl); | |
e67f86b3 AB |
3291 | skd_release_special(skdev, skspcl); |
3292 | } else { | |
2e44b427 | 3293 | pr_debug("%s:%s:%d not orphaned %p\n", |
3294 | skdev->name, __func__, __LINE__, | |
3295 | skspcl); | |
e67f86b3 AB |
3296 | skspcl->req.state = SKD_REQ_STATE_ABORTED; |
3297 | } | |
3298 | } | |
3299 | } | |
3300 | skdev->skspcl_free_list = skdev->skspcl_table; | |
3301 | ||
3302 | for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++) | |
3303 | skdev->timeout_slot[i] = 0; | |
3304 | ||
3305 | skdev->in_flight = 0; | |
3306 | } | |
3307 | ||
3308 | static void skd_isr_msg_from_dev(struct skd_device *skdev) | |
3309 | { | |
3310 | u32 mfd; | |
3311 | u32 mtd; | |
3312 | u32 data; | |
3313 | ||
3314 | mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); | |
3315 | ||
2e44b427 | 3316 | pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n", |
3317 | skdev->name, __func__, __LINE__, mfd, skdev->last_mtd); | |
e67f86b3 AB |
3318 | |
3319 | /* ignore any mtd that is an ack for something we didn't send */ | |
3320 | if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) | |
3321 | return; | |
3322 | ||
3323 | switch (FIT_MXD_TYPE(mfd)) { | |
3324 | case FIT_MTD_FITFW_INIT: | |
3325 | skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd); | |
3326 | ||
3327 | if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) { | |
3328 | pr_err("(%s): protocol mismatch\n", | |
3329 | skdev->name); | |
3330 | pr_err("(%s): got=%d support=%d\n", | |
3331 | skdev->name, skdev->proto_ver, | |
3332 | FIT_PROTOCOL_VERSION_1); | |
3333 | pr_err("(%s): please upgrade driver\n", | |
3334 | skdev->name); | |
3335 | skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH; | |
3336 | skd_soft_reset(skdev); | |
3337 | break; | |
3338 | } | |
3339 | mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0); | |
3340 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
3341 | skdev->last_mtd = mtd; | |
3342 | break; | |
3343 | ||
3344 | case FIT_MTD_GET_CMDQ_DEPTH: | |
3345 | skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd); | |
3346 | mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0, | |
3347 | SKD_N_COMPLETION_ENTRY); | |
3348 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
3349 | skdev->last_mtd = mtd; | |
3350 | break; | |
3351 | ||
3352 | case FIT_MTD_SET_COMPQ_DEPTH: | |
3353 | SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG); | |
3354 | mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0); | |
3355 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
3356 | skdev->last_mtd = mtd; | |
3357 | break; | |
3358 | ||
3359 | case FIT_MTD_SET_COMPQ_ADDR: | |
3360 | skd_reset_skcomp(skdev); | |
3361 | mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno); | |
3362 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
3363 | skdev->last_mtd = mtd; | |
3364 | break; | |
3365 | ||
3366 | case FIT_MTD_CMD_LOG_HOST_ID: | |
3367 | skdev->connect_time_stamp = get_seconds(); | |
3368 | data = skdev->connect_time_stamp & 0xFFFF; | |
3369 | mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data); | |
3370 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
3371 | skdev->last_mtd = mtd; | |
3372 | break; | |
3373 | ||
3374 | case FIT_MTD_CMD_LOG_TIME_STAMP_LO: | |
3375 | skdev->drive_jiffies = FIT_MXD_DATA(mfd); | |
3376 | data = (skdev->connect_time_stamp >> 16) & 0xFFFF; | |
3377 | mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data); | |
3378 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
3379 | skdev->last_mtd = mtd; | |
3380 | break; | |
3381 | ||
3382 | case FIT_MTD_CMD_LOG_TIME_STAMP_HI: | |
3383 | skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16); | |
3384 | mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0); | |
3385 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
3386 | skdev->last_mtd = mtd; | |
3387 | ||
3388 | pr_err("(%s): Time sync driver=0x%x device=0x%x\n", | |
3389 | skd_name(skdev), | |
3390 | skdev->connect_time_stamp, skdev->drive_jiffies); | |
3391 | break; | |
3392 | ||
3393 | case FIT_MTD_ARM_QUEUE: | |
3394 | skdev->last_mtd = 0; | |
3395 | /* | |
3396 | * State should be, or soon will be, FIT_SR_DRIVE_ONLINE. | |
3397 | */ | |
3398 | break; | |
3399 | ||
3400 | default: | |
3401 | break; | |
3402 | } | |
3403 | } | |
3404 | ||
3405 | static void skd_disable_interrupts(struct skd_device *skdev) | |
3406 | { | |
3407 | u32 sense; | |
3408 | ||
3409 | sense = SKD_READL(skdev, FIT_CONTROL); | |
3410 | sense &= ~FIT_CR_ENABLE_INTERRUPTS; | |
3411 | SKD_WRITEL(skdev, sense, FIT_CONTROL); | |
2e44b427 | 3412 | pr_debug("%s:%s:%d sense 0x%x\n", |
3413 | skdev->name, __func__, __LINE__, sense); | |
e67f86b3 AB |
3414 | |
3415 | /* Note that the 1s is written. A 1-bit means | |
3416 | * disable, a 0 means enable. | |
3417 | */ | |
3418 | SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST); | |
3419 | } | |
3420 | ||
3421 | static void skd_enable_interrupts(struct skd_device *skdev) | |
3422 | { | |
3423 | u32 val; | |
3424 | ||
3425 | /* unmask interrupts first */ | |
3426 | val = FIT_ISH_FW_STATE_CHANGE + | |
3427 | FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV; | |
3428 | ||
3429 | /* Note that the compliment of mask is written. A 1-bit means | |
3430 | * disable, a 0 means enable. */ | |
3431 | SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST); | |
2e44b427 | 3432 | pr_debug("%s:%s:%d interrupt mask=0x%x\n", |
3433 | skdev->name, __func__, __LINE__, ~val); | |
e67f86b3 AB |
3434 | |
3435 | val = SKD_READL(skdev, FIT_CONTROL); | |
3436 | val |= FIT_CR_ENABLE_INTERRUPTS; | |
2e44b427 | 3437 | pr_debug("%s:%s:%d control=0x%x\n", |
3438 | skdev->name, __func__, __LINE__, val); | |
e67f86b3 AB |
3439 | SKD_WRITEL(skdev, val, FIT_CONTROL); |
3440 | } | |
3441 | ||
3442 | /* | |
3443 | ***************************************************************************** | |
3444 | * START, STOP, RESTART, QUIESCE, UNQUIESCE | |
3445 | ***************************************************************************** | |
3446 | */ | |
3447 | ||
3448 | static void skd_soft_reset(struct skd_device *skdev) | |
3449 | { | |
3450 | u32 val; | |
3451 | ||
3452 | val = SKD_READL(skdev, FIT_CONTROL); | |
3453 | val |= (FIT_CR_SOFT_RESET); | |
2e44b427 | 3454 | pr_debug("%s:%s:%d control=0x%x\n", |
3455 | skdev->name, __func__, __LINE__, val); | |
e67f86b3 AB |
3456 | SKD_WRITEL(skdev, val, FIT_CONTROL); |
3457 | } | |
3458 | ||
3459 | static void skd_start_device(struct skd_device *skdev) | |
3460 | { | |
3461 | unsigned long flags; | |
3462 | u32 sense; | |
3463 | u32 state; | |
3464 | ||
3465 | spin_lock_irqsave(&skdev->lock, flags); | |
3466 | ||
3467 | /* ack all ghost interrupts */ | |
3468 | SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); | |
3469 | ||
3470 | sense = SKD_READL(skdev, FIT_STATUS); | |
3471 | ||
2e44b427 | 3472 | pr_debug("%s:%s:%d initial status=0x%x\n", |
3473 | skdev->name, __func__, __LINE__, sense); | |
e67f86b3 AB |
3474 | |
3475 | state = sense & FIT_SR_DRIVE_STATE_MASK; | |
3476 | skdev->drive_state = state; | |
3477 | skdev->last_mtd = 0; | |
3478 | ||
3479 | skdev->state = SKD_DRVR_STATE_STARTING; | |
3480 | skdev->timer_countdown = SKD_STARTING_TIMO; | |
3481 | ||
3482 | skd_enable_interrupts(skdev); | |
3483 | ||
3484 | switch (skdev->drive_state) { | |
3485 | case FIT_SR_DRIVE_OFFLINE: | |
3486 | pr_err("(%s): Drive offline...\n", skd_name(skdev)); | |
3487 | break; | |
3488 | ||
3489 | case FIT_SR_DRIVE_FW_BOOTING: | |
2e44b427 | 3490 | pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n", |
3491 | skdev->name, __func__, __LINE__, skdev->name); | |
e67f86b3 AB |
3492 | skdev->state = SKD_DRVR_STATE_WAIT_BOOT; |
3493 | skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; | |
3494 | break; | |
3495 | ||
3496 | case FIT_SR_DRIVE_BUSY_SANITIZE: | |
3497 | pr_info("(%s): Start: BUSY_SANITIZE\n", | |
3498 | skd_name(skdev)); | |
3499 | skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; | |
3500 | skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; | |
3501 | break; | |
3502 | ||
3503 | case FIT_SR_DRIVE_BUSY_ERASE: | |
3504 | pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev)); | |
3505 | skdev->state = SKD_DRVR_STATE_BUSY_ERASE; | |
3506 | skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; | |
3507 | break; | |
3508 | ||
3509 | case FIT_SR_DRIVE_INIT: | |
3510 | case FIT_SR_DRIVE_ONLINE: | |
3511 | skd_soft_reset(skdev); | |
3512 | break; | |
3513 | ||
3514 | case FIT_SR_DRIVE_BUSY: | |
3515 | pr_err("(%s): Drive Busy...\n", skd_name(skdev)); | |
3516 | skdev->state = SKD_DRVR_STATE_BUSY; | |
3517 | skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; | |
3518 | break; | |
3519 | ||
3520 | case FIT_SR_DRIVE_SOFT_RESET: | |
3521 | pr_err("(%s) drive soft reset in prog\n", | |
3522 | skd_name(skdev)); | |
3523 | break; | |
3524 | ||
3525 | case FIT_SR_DRIVE_FAULT: | |
3526 | /* Fault state is bad...soft reset won't do it... | |
3527 | * Hard reset, maybe, but does it work on device? | |
3528 | * For now, just fault so the system doesn't hang. | |
3529 | */ | |
3530 | skd_drive_fault(skdev); | |
3531 | /*start the queue so we can respond with error to requests */ | |
2e44b427 | 3532 | pr_debug("%s:%s:%d starting %s queue\n", |
3533 | skdev->name, __func__, __LINE__, skdev->name); | |
6a5ec65b | 3534 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
3535 | skdev->gendisk_on = -1; |
3536 | wake_up_interruptible(&skdev->waitq); | |
3537 | break; | |
3538 | ||
3539 | case 0xFF: | |
3540 | /* Most likely the device isn't there or isn't responding | |
3541 | * to the BAR1 addresses. */ | |
3542 | skd_drive_disappeared(skdev); | |
3543 | /*start the queue so we can respond with error to requests */ | |
2e44b427 | 3544 | pr_debug("%s:%s:%d starting %s queue to error-out reqs\n", |
3545 | skdev->name, __func__, __LINE__, skdev->name); | |
6a5ec65b | 3546 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
3547 | skdev->gendisk_on = -1; |
3548 | wake_up_interruptible(&skdev->waitq); | |
3549 | break; | |
3550 | ||
3551 | default: | |
3552 | pr_err("(%s) Start: unknown state %x\n", | |
3553 | skd_name(skdev), skdev->drive_state); | |
3554 | break; | |
3555 | } | |
3556 | ||
3557 | state = SKD_READL(skdev, FIT_CONTROL); | |
2e44b427 | 3558 | pr_debug("%s:%s:%d FIT Control Status=0x%x\n", |
3559 | skdev->name, __func__, __LINE__, state); | |
e67f86b3 AB |
3560 | |
3561 | state = SKD_READL(skdev, FIT_INT_STATUS_HOST); | |
2e44b427 | 3562 | pr_debug("%s:%s:%d Intr Status=0x%x\n", |
3563 | skdev->name, __func__, __LINE__, state); | |
e67f86b3 AB |
3564 | |
3565 | state = SKD_READL(skdev, FIT_INT_MASK_HOST); | |
2e44b427 | 3566 | pr_debug("%s:%s:%d Intr Mask=0x%x\n", |
3567 | skdev->name, __func__, __LINE__, state); | |
e67f86b3 AB |
3568 | |
3569 | state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); | |
2e44b427 | 3570 | pr_debug("%s:%s:%d Msg from Dev=0x%x\n", |
3571 | skdev->name, __func__, __LINE__, state); | |
e67f86b3 AB |
3572 | |
3573 | state = SKD_READL(skdev, FIT_HW_VERSION); | |
2e44b427 | 3574 | pr_debug("%s:%s:%d HW version=0x%x\n", |
3575 | skdev->name, __func__, __LINE__, state); | |
e67f86b3 AB |
3576 | |
3577 | spin_unlock_irqrestore(&skdev->lock, flags); | |
3578 | } | |
3579 | ||
3580 | static void skd_stop_device(struct skd_device *skdev) | |
3581 | { | |
3582 | unsigned long flags; | |
3583 | struct skd_special_context *skspcl = &skdev->internal_skspcl; | |
3584 | u32 dev_state; | |
3585 | int i; | |
3586 | ||
3587 | spin_lock_irqsave(&skdev->lock, flags); | |
3588 | ||
3589 | if (skdev->state != SKD_DRVR_STATE_ONLINE) { | |
3590 | pr_err("(%s): skd_stop_device not online no sync\n", | |
3591 | skd_name(skdev)); | |
3592 | goto stop_out; | |
3593 | } | |
3594 | ||
3595 | if (skspcl->req.state != SKD_REQ_STATE_IDLE) { | |
3596 | pr_err("(%s): skd_stop_device no special\n", | |
3597 | skd_name(skdev)); | |
3598 | goto stop_out; | |
3599 | } | |
3600 | ||
3601 | skdev->state = SKD_DRVR_STATE_SYNCING; | |
3602 | skdev->sync_done = 0; | |
3603 | ||
3604 | skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); | |
3605 | ||
3606 | spin_unlock_irqrestore(&skdev->lock, flags); | |
3607 | ||
3608 | wait_event_interruptible_timeout(skdev->waitq, | |
3609 | (skdev->sync_done), (10 * HZ)); | |
3610 | ||
3611 | spin_lock_irqsave(&skdev->lock, flags); | |
3612 | ||
3613 | switch (skdev->sync_done) { | |
3614 | case 0: | |
3615 | pr_err("(%s): skd_stop_device no sync\n", | |
3616 | skd_name(skdev)); | |
3617 | break; | |
3618 | case 1: | |
3619 | pr_err("(%s): skd_stop_device sync done\n", | |
3620 | skd_name(skdev)); | |
3621 | break; | |
3622 | default: | |
3623 | pr_err("(%s): skd_stop_device sync error\n", | |
3624 | skd_name(skdev)); | |
3625 | } | |
3626 | ||
3627 | stop_out: | |
3628 | skdev->state = SKD_DRVR_STATE_STOPPING; | |
3629 | spin_unlock_irqrestore(&skdev->lock, flags); | |
3630 | ||
3631 | skd_kill_timer(skdev); | |
3632 | ||
3633 | spin_lock_irqsave(&skdev->lock, flags); | |
3634 | skd_disable_interrupts(skdev); | |
3635 | ||
3636 | /* ensure all ints on device are cleared */ | |
3637 | /* soft reset the device to unload with a clean slate */ | |
3638 | SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); | |
3639 | SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL); | |
3640 | ||
3641 | spin_unlock_irqrestore(&skdev->lock, flags); | |
3642 | ||
3643 | /* poll every 100ms, 1 second timeout */ | |
3644 | for (i = 0; i < 10; i++) { | |
3645 | dev_state = | |
3646 | SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK; | |
3647 | if (dev_state == FIT_SR_DRIVE_INIT) | |
3648 | break; | |
3649 | set_current_state(TASK_INTERRUPTIBLE); | |
3650 | schedule_timeout(msecs_to_jiffies(100)); | |
3651 | } | |
3652 | ||
3653 | if (dev_state != FIT_SR_DRIVE_INIT) | |
3654 | pr_err("(%s): skd_stop_device state error 0x%02x\n", | |
3655 | skd_name(skdev), dev_state); | |
3656 | } | |
3657 | ||
3658 | /* assume spinlock is held */ | |
3659 | static void skd_restart_device(struct skd_device *skdev) | |
3660 | { | |
3661 | u32 state; | |
3662 | ||
3663 | /* ack all ghost interrupts */ | |
3664 | SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); | |
3665 | ||
3666 | state = SKD_READL(skdev, FIT_STATUS); | |
3667 | ||
2e44b427 | 3668 | pr_debug("%s:%s:%d drive status=0x%x\n", |
3669 | skdev->name, __func__, __LINE__, state); | |
e67f86b3 AB |
3670 | |
3671 | state &= FIT_SR_DRIVE_STATE_MASK; | |
3672 | skdev->drive_state = state; | |
3673 | skdev->last_mtd = 0; | |
3674 | ||
3675 | skdev->state = SKD_DRVR_STATE_RESTARTING; | |
3676 | skdev->timer_countdown = SKD_RESTARTING_TIMO; | |
3677 | ||
3678 | skd_soft_reset(skdev); | |
3679 | } | |
3680 | ||
3681 | /* assume spinlock is held */ | |
3682 | static int skd_quiesce_dev(struct skd_device *skdev) | |
3683 | { | |
3684 | int rc = 0; | |
3685 | ||
3686 | switch (skdev->state) { | |
3687 | case SKD_DRVR_STATE_BUSY: | |
3688 | case SKD_DRVR_STATE_BUSY_IMMINENT: | |
2e44b427 | 3689 | pr_debug("%s:%s:%d stopping %s queue\n", |
3690 | skdev->name, __func__, __LINE__, skdev->name); | |
6a5ec65b | 3691 | blk_stop_queue(skdev->queue); |
e67f86b3 AB |
3692 | break; |
3693 | case SKD_DRVR_STATE_ONLINE: | |
3694 | case SKD_DRVR_STATE_STOPPING: | |
3695 | case SKD_DRVR_STATE_SYNCING: | |
3696 | case SKD_DRVR_STATE_PAUSING: | |
3697 | case SKD_DRVR_STATE_PAUSED: | |
3698 | case SKD_DRVR_STATE_STARTING: | |
3699 | case SKD_DRVR_STATE_RESTARTING: | |
3700 | case SKD_DRVR_STATE_RESUMING: | |
3701 | default: | |
3702 | rc = -EINVAL; | |
2e44b427 | 3703 | pr_debug("%s:%s:%d state [%d] not implemented\n", |
3704 | skdev->name, __func__, __LINE__, skdev->state); | |
e67f86b3 AB |
3705 | } |
3706 | return rc; | |
3707 | } | |
3708 | ||
3709 | /* assume spinlock is held */ | |
3710 | static int skd_unquiesce_dev(struct skd_device *skdev) | |
3711 | { | |
3712 | int prev_driver_state = skdev->state; | |
3713 | ||
3714 | skd_log_skdev(skdev, "unquiesce"); | |
3715 | if (skdev->state == SKD_DRVR_STATE_ONLINE) { | |
2e44b427 | 3716 | pr_debug("%s:%s:%d **** device already ONLINE\n", |
3717 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
3718 | return 0; |
3719 | } | |
3720 | if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) { | |
3721 | /* | |
3722 | * If there has been an state change to other than | |
3723 | * ONLINE, we will rely on controller state change | |
3724 | * to come back online and restart the queue. | |
3725 | * The BUSY state means that driver is ready to | |
3726 | * continue normal processing but waiting for controller | |
3727 | * to become available. | |
3728 | */ | |
3729 | skdev->state = SKD_DRVR_STATE_BUSY; | |
2e44b427 | 3730 | pr_debug("%s:%s:%d drive BUSY state\n", |
3731 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
3732 | return 0; |
3733 | } | |
3734 | ||
3735 | /* | |
3736 | * Drive has just come online, driver is either in startup, | |
3737 | * paused performing a task, or bust waiting for hardware. | |
3738 | */ | |
3739 | switch (skdev->state) { | |
3740 | case SKD_DRVR_STATE_PAUSED: | |
3741 | case SKD_DRVR_STATE_BUSY: | |
3742 | case SKD_DRVR_STATE_BUSY_IMMINENT: | |
3743 | case SKD_DRVR_STATE_BUSY_ERASE: | |
3744 | case SKD_DRVR_STATE_STARTING: | |
3745 | case SKD_DRVR_STATE_RESTARTING: | |
3746 | case SKD_DRVR_STATE_FAULT: | |
3747 | case SKD_DRVR_STATE_IDLE: | |
3748 | case SKD_DRVR_STATE_LOAD: | |
3749 | skdev->state = SKD_DRVR_STATE_ONLINE; | |
3750 | pr_err("(%s): Driver state %s(%d)=>%s(%d)\n", | |
3751 | skd_name(skdev), | |
3752 | skd_skdev_state_to_str(prev_driver_state), | |
3753 | prev_driver_state, skd_skdev_state_to_str(skdev->state), | |
3754 | skdev->state); | |
2e44b427 | 3755 | pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n", |
3756 | skdev->name, __func__, __LINE__); | |
3757 | pr_debug("%s:%s:%d starting %s queue\n", | |
3758 | skdev->name, __func__, __LINE__, skdev->name); | |
e67f86b3 | 3759 | pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev)); |
6a5ec65b | 3760 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
3761 | skdev->gendisk_on = 1; |
3762 | wake_up_interruptible(&skdev->waitq); | |
3763 | break; | |
3764 | ||
3765 | case SKD_DRVR_STATE_DISAPPEARED: | |
3766 | default: | |
2e44b427 | 3767 | pr_debug("%s:%s:%d **** driver state %d, not implemented \n", |
3768 | skdev->name, __func__, __LINE__, | |
3769 | skdev->state); | |
e67f86b3 AB |
3770 | return -EBUSY; |
3771 | } | |
3772 | return 0; | |
3773 | } | |
3774 | ||
3775 | /* | |
3776 | ***************************************************************************** | |
3777 | * PCIe MSI/MSI-X INTERRUPT HANDLERS | |
3778 | ***************************************************************************** | |
3779 | */ | |
3780 | ||
3781 | static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data) | |
3782 | { | |
3783 | struct skd_device *skdev = skd_host_data; | |
3784 | unsigned long flags; | |
3785 | ||
3786 | spin_lock_irqsave(&skdev->lock, flags); | |
2e44b427 | 3787 | pr_debug("%s:%s:%d MSIX = 0x%x\n", |
3788 | skdev->name, __func__, __LINE__, | |
3789 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
e67f86b3 AB |
3790 | pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev), |
3791 | irq, SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
3792 | SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST); | |
3793 | spin_unlock_irqrestore(&skdev->lock, flags); | |
3794 | return IRQ_HANDLED; | |
3795 | } | |
3796 | ||
3797 | static irqreturn_t skd_statec_isr(int irq, void *skd_host_data) | |
3798 | { | |
3799 | struct skd_device *skdev = skd_host_data; | |
3800 | unsigned long flags; | |
3801 | ||
3802 | spin_lock_irqsave(&skdev->lock, flags); | |
2e44b427 | 3803 | pr_debug("%s:%s:%d MSIX = 0x%x\n", |
3804 | skdev->name, __func__, __LINE__, | |
3805 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
e67f86b3 AB |
3806 | SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST); |
3807 | skd_isr_fwstate(skdev); | |
3808 | spin_unlock_irqrestore(&skdev->lock, flags); | |
3809 | return IRQ_HANDLED; | |
3810 | } | |
3811 | ||
3812 | static irqreturn_t skd_comp_q(int irq, void *skd_host_data) | |
3813 | { | |
3814 | struct skd_device *skdev = skd_host_data; | |
3815 | unsigned long flags; | |
3816 | int flush_enqueued = 0; | |
3817 | int deferred; | |
3818 | ||
3819 | spin_lock_irqsave(&skdev->lock, flags); | |
2e44b427 | 3820 | pr_debug("%s:%s:%d MSIX = 0x%x\n", |
3821 | skdev->name, __func__, __LINE__, | |
3822 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
e67f86b3 AB |
3823 | SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST); |
3824 | deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit, | |
3825 | &flush_enqueued); | |
e67f86b3 AB |
3826 | if (flush_enqueued) |
3827 | skd_request_fn(skdev->queue); | |
3828 | ||
3829 | if (deferred) | |
3830 | schedule_work(&skdev->completion_worker); | |
3831 | else if (!flush_enqueued) | |
3832 | skd_request_fn(skdev->queue); | |
3833 | ||
3834 | spin_unlock_irqrestore(&skdev->lock, flags); | |
3835 | ||
3836 | return IRQ_HANDLED; | |
3837 | } | |
3838 | ||
3839 | static irqreturn_t skd_msg_isr(int irq, void *skd_host_data) | |
3840 | { | |
3841 | struct skd_device *skdev = skd_host_data; | |
3842 | unsigned long flags; | |
3843 | ||
3844 | spin_lock_irqsave(&skdev->lock, flags); | |
2e44b427 | 3845 | pr_debug("%s:%s:%d MSIX = 0x%x\n", |
3846 | skdev->name, __func__, __LINE__, | |
3847 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
e67f86b3 AB |
3848 | SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST); |
3849 | skd_isr_msg_from_dev(skdev); | |
3850 | spin_unlock_irqrestore(&skdev->lock, flags); | |
3851 | return IRQ_HANDLED; | |
3852 | } | |
3853 | ||
3854 | static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data) | |
3855 | { | |
3856 | struct skd_device *skdev = skd_host_data; | |
3857 | unsigned long flags; | |
3858 | ||
3859 | spin_lock_irqsave(&skdev->lock, flags); | |
2e44b427 | 3860 | pr_debug("%s:%s:%d MSIX = 0x%x\n", |
3861 | skdev->name, __func__, __LINE__, | |
3862 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
e67f86b3 AB |
3863 | SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST); |
3864 | spin_unlock_irqrestore(&skdev->lock, flags); | |
3865 | return IRQ_HANDLED; | |
3866 | } | |
3867 | ||
3868 | /* | |
3869 | ***************************************************************************** | |
3870 | * PCIe MSI/MSI-X SETUP | |
3871 | ***************************************************************************** | |
3872 | */ | |
3873 | ||
3874 | struct skd_msix_entry { | |
3875 | int have_irq; | |
3876 | u32 vector; | |
3877 | u32 entry; | |
3878 | struct skd_device *rsp; | |
3879 | char isr_name[30]; | |
3880 | }; | |
3881 | ||
3882 | struct skd_init_msix_entry { | |
3883 | const char *name; | |
3884 | irq_handler_t handler; | |
3885 | }; | |
3886 | ||
3887 | #define SKD_MAX_MSIX_COUNT 13 | |
3888 | #define SKD_MIN_MSIX_COUNT 7 | |
3889 | #define SKD_BASE_MSIX_IRQ 4 | |
3890 | ||
3891 | static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = { | |
3892 | { "(DMA 0)", skd_reserved_isr }, | |
3893 | { "(DMA 1)", skd_reserved_isr }, | |
3894 | { "(DMA 2)", skd_reserved_isr }, | |
3895 | { "(DMA 3)", skd_reserved_isr }, | |
3896 | { "(State Change)", skd_statec_isr }, | |
3897 | { "(COMPL_Q)", skd_comp_q }, | |
3898 | { "(MSG)", skd_msg_isr }, | |
3899 | { "(Reserved)", skd_reserved_isr }, | |
3900 | { "(Reserved)", skd_reserved_isr }, | |
3901 | { "(Queue Full 0)", skd_qfull_isr }, | |
3902 | { "(Queue Full 1)", skd_qfull_isr }, | |
3903 | { "(Queue Full 2)", skd_qfull_isr }, | |
3904 | { "(Queue Full 3)", skd_qfull_isr }, | |
3905 | }; | |
3906 | ||
3907 | static void skd_release_msix(struct skd_device *skdev) | |
3908 | { | |
3909 | struct skd_msix_entry *qentry; | |
3910 | int i; | |
3911 | ||
46817769 AG |
3912 | if (skdev->msix_entries) { |
3913 | for (i = 0; i < skdev->msix_count; i++) { | |
3914 | qentry = &skdev->msix_entries[i]; | |
3915 | skdev = qentry->rsp; | |
3916 | ||
3917 | if (qentry->have_irq) | |
3918 | devm_free_irq(&skdev->pdev->dev, | |
3919 | qentry->vector, qentry->rsp); | |
3920 | } | |
e67f86b3 | 3921 | |
46817769 | 3922 | kfree(skdev->msix_entries); |
e67f86b3 | 3923 | } |
46817769 AG |
3924 | |
3925 | if (skdev->msix_count) | |
3926 | pci_disable_msix(skdev->pdev); | |
3927 | ||
e67f86b3 AB |
3928 | skdev->msix_count = 0; |
3929 | skdev->msix_entries = NULL; | |
3930 | } | |
3931 | ||
3932 | static int skd_acquire_msix(struct skd_device *skdev) | |
3933 | { | |
a9df8625 | 3934 | int i, rc; |
46817769 AG |
3935 | struct pci_dev *pdev = skdev->pdev; |
3936 | struct msix_entry *entries; | |
e67f86b3 AB |
3937 | struct skd_msix_entry *qentry; |
3938 | ||
a9df8625 AG |
3939 | entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT, |
3940 | GFP_KERNEL); | |
e67f86b3 AB |
3941 | if (!entries) |
3942 | return -ENOMEM; | |
3943 | ||
a9df8625 | 3944 | for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) |
e67f86b3 AB |
3945 | entries[i].entry = i; |
3946 | ||
01aad3f0 AG |
3947 | rc = pci_enable_msix_exact(pdev, entries, SKD_MAX_MSIX_COUNT); |
3948 | if (rc) { | |
a9df8625 AG |
3949 | pr_err("(%s): failed to enable MSI-X %d\n", |
3950 | skd_name(skdev), rc); | |
e67f86b3 | 3951 | goto msix_out; |
e67f86b3 | 3952 | } |
46817769 | 3953 | |
01aad3f0 | 3954 | skdev->msix_count = SKD_MAX_MSIX_COUNT; |
e67f86b3 AB |
3955 | skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) * |
3956 | skdev->msix_count, GFP_KERNEL); | |
3957 | if (!skdev->msix_entries) { | |
3958 | rc = -ENOMEM; | |
e67f86b3 AB |
3959 | pr_err("(%s): msix table allocation error\n", |
3960 | skd_name(skdev)); | |
3961 | goto msix_out; | |
3962 | } | |
3963 | ||
e67f86b3 | 3964 | for (i = 0; i < skdev->msix_count; i++) { |
1bc5ce5d | 3965 | qentry = &skdev->msix_entries[i]; |
e67f86b3 AB |
3966 | qentry->vector = entries[i].vector; |
3967 | qentry->entry = entries[i].entry; | |
3968 | qentry->rsp = NULL; | |
3969 | qentry->have_irq = 0; | |
2e44b427 | 3970 | pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n", |
3971 | skdev->name, __func__, __LINE__, | |
3972 | pci_name(pdev), skdev->name, | |
3973 | i, qentry->vector, qentry->entry); | |
e67f86b3 AB |
3974 | } |
3975 | ||
3976 | /* Enable MSI-X vectors for the base queue */ | |
c5e3035c | 3977 | for (i = 0; i < skdev->msix_count; i++) { |
e67f86b3 AB |
3978 | qentry = &skdev->msix_entries[i]; |
3979 | snprintf(qentry->isr_name, sizeof(qentry->isr_name), | |
3980 | "%s%d-msix %s", DRV_NAME, skdev->devno, | |
3981 | msix_entries[i].name); | |
3982 | rc = devm_request_irq(&skdev->pdev->dev, qentry->vector, | |
3983 | msix_entries[i].handler, 0, | |
3984 | qentry->isr_name, skdev); | |
3985 | if (rc) { | |
3986 | pr_err("(%s): Unable to register(%d) MSI-X " | |
3987 | "handler %d: %s\n", | |
3988 | skd_name(skdev), rc, i, qentry->isr_name); | |
3989 | goto msix_out; | |
3990 | } else { | |
3991 | qentry->have_irq = 1; | |
3992 | qentry->rsp = skdev; | |
3993 | } | |
3994 | } | |
2e44b427 | 3995 | pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n", |
3996 | skdev->name, __func__, __LINE__, | |
3997 | pci_name(pdev), skdev->name, skdev->msix_count); | |
e67f86b3 AB |
3998 | return 0; |
3999 | ||
4000 | msix_out: | |
4001 | if (entries) | |
4002 | kfree(entries); | |
4003 | skd_release_msix(skdev); | |
4004 | return rc; | |
4005 | } | |
4006 | ||
4007 | static int skd_acquire_irq(struct skd_device *skdev) | |
4008 | { | |
4009 | int rc; | |
4010 | struct pci_dev *pdev; | |
4011 | ||
4012 | pdev = skdev->pdev; | |
4013 | skdev->msix_count = 0; | |
4014 | ||
4015 | RETRY_IRQ_TYPE: | |
4016 | switch (skdev->irq_type) { | |
4017 | case SKD_IRQ_MSIX: | |
4018 | rc = skd_acquire_msix(skdev); | |
4019 | if (!rc) | |
4020 | pr_info("(%s): MSI-X %d irqs enabled\n", | |
4021 | skd_name(skdev), skdev->msix_count); | |
4022 | else { | |
4023 | pr_err( | |
4024 | "(%s): failed to enable MSI-X, re-trying with MSI %d\n", | |
4025 | skd_name(skdev), rc); | |
4026 | skdev->irq_type = SKD_IRQ_MSI; | |
4027 | goto RETRY_IRQ_TYPE; | |
4028 | } | |
4029 | break; | |
4030 | case SKD_IRQ_MSI: | |
4031 | snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi", | |
4032 | DRV_NAME, skdev->devno); | |
a9df8625 AG |
4033 | rc = pci_enable_msi_range(pdev, 1, 1); |
4034 | if (rc > 0) { | |
e67f86b3 AB |
4035 | rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0, |
4036 | skdev->isr_name, skdev); | |
4037 | if (rc) { | |
4038 | pci_disable_msi(pdev); | |
4039 | pr_err( | |
4040 | "(%s): failed to allocate the MSI interrupt %d\n", | |
4041 | skd_name(skdev), rc); | |
4042 | goto RETRY_IRQ_LEGACY; | |
4043 | } | |
4044 | pr_info("(%s): MSI irq %d enabled\n", | |
4045 | skd_name(skdev), pdev->irq); | |
4046 | } else { | |
4047 | RETRY_IRQ_LEGACY: | |
4048 | pr_err( | |
4049 | "(%s): failed to enable MSI, re-trying with LEGACY %d\n", | |
4050 | skd_name(skdev), rc); | |
4051 | skdev->irq_type = SKD_IRQ_LEGACY; | |
4052 | goto RETRY_IRQ_TYPE; | |
4053 | } | |
4054 | break; | |
4055 | case SKD_IRQ_LEGACY: | |
4056 | snprintf(skdev->isr_name, sizeof(skdev->isr_name), | |
4057 | "%s%d-legacy", DRV_NAME, skdev->devno); | |
4058 | rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, | |
4059 | IRQF_SHARED, skdev->isr_name, skdev); | |
4060 | if (!rc) | |
4061 | pr_info("(%s): LEGACY irq %d enabled\n", | |
4062 | skd_name(skdev), pdev->irq); | |
4063 | else | |
4064 | pr_err("(%s): request LEGACY irq error %d\n", | |
4065 | skd_name(skdev), rc); | |
4066 | break; | |
4067 | default: | |
4068 | pr_info("(%s): irq_type %d invalid, re-set to %d\n", | |
4069 | skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT); | |
4070 | skdev->irq_type = SKD_IRQ_LEGACY; | |
4071 | goto RETRY_IRQ_TYPE; | |
4072 | } | |
4073 | return rc; | |
4074 | } | |
4075 | ||
4076 | static void skd_release_irq(struct skd_device *skdev) | |
4077 | { | |
4078 | switch (skdev->irq_type) { | |
4079 | case SKD_IRQ_MSIX: | |
4080 | skd_release_msix(skdev); | |
4081 | break; | |
4082 | case SKD_IRQ_MSI: | |
4083 | devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev); | |
4084 | pci_disable_msi(skdev->pdev); | |
4085 | break; | |
4086 | case SKD_IRQ_LEGACY: | |
4087 | devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev); | |
4088 | break; | |
4089 | default: | |
4090 | pr_err("(%s): wrong irq type %d!", | |
4091 | skd_name(skdev), skdev->irq_type); | |
4092 | break; | |
4093 | } | |
4094 | } | |
4095 | ||
4096 | /* | |
4097 | ***************************************************************************** | |
4098 | * CONSTRUCT | |
4099 | ***************************************************************************** | |
4100 | */ | |
4101 | ||
e67f86b3 AB |
4102 | static int skd_cons_skcomp(struct skd_device *skdev) |
4103 | { | |
4104 | int rc = 0; | |
4105 | struct fit_completion_entry_v1 *skcomp; | |
4106 | u32 nbytes; | |
4107 | ||
4108 | nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY; | |
4109 | nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; | |
4110 | ||
2e44b427 | 4111 | pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n", |
4112 | skdev->name, __func__, __LINE__, | |
4113 | nbytes, SKD_N_COMPLETION_ENTRY); | |
e67f86b3 | 4114 | |
a5bbf616 JP |
4115 | skcomp = pci_zalloc_consistent(skdev->pdev, nbytes, |
4116 | &skdev->cq_dma_address); | |
e67f86b3 AB |
4117 | |
4118 | if (skcomp == NULL) { | |
4119 | rc = -ENOMEM; | |
4120 | goto err_out; | |
4121 | } | |
4122 | ||
e67f86b3 AB |
4123 | skdev->skcomp_table = skcomp; |
4124 | skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp + | |
4125 | sizeof(*skcomp) * | |
4126 | SKD_N_COMPLETION_ENTRY); | |
4127 | ||
4128 | err_out: | |
4129 | return rc; | |
4130 | } | |
4131 | ||
4132 | static int skd_cons_skmsg(struct skd_device *skdev) | |
4133 | { | |
4134 | int rc = 0; | |
4135 | u32 i; | |
4136 | ||
2e44b427 | 4137 | pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n", |
4138 | skdev->name, __func__, __LINE__, | |
4139 | sizeof(struct skd_fitmsg_context), | |
4140 | skdev->num_fitmsg_context, | |
4141 | sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context); | |
e67f86b3 AB |
4142 | |
4143 | skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context) | |
4144 | *skdev->num_fitmsg_context, GFP_KERNEL); | |
4145 | if (skdev->skmsg_table == NULL) { | |
4146 | rc = -ENOMEM; | |
4147 | goto err_out; | |
4148 | } | |
4149 | ||
4150 | for (i = 0; i < skdev->num_fitmsg_context; i++) { | |
4151 | struct skd_fitmsg_context *skmsg; | |
4152 | ||
4153 | skmsg = &skdev->skmsg_table[i]; | |
4154 | ||
4155 | skmsg->id = i + SKD_ID_FIT_MSG; | |
4156 | ||
4157 | skmsg->state = SKD_MSG_STATE_IDLE; | |
4158 | skmsg->msg_buf = pci_alloc_consistent(skdev->pdev, | |
4159 | SKD_N_FITMSG_BYTES + 64, | |
4160 | &skmsg->mb_dma_address); | |
4161 | ||
4162 | if (skmsg->msg_buf == NULL) { | |
4163 | rc = -ENOMEM; | |
4164 | goto err_out; | |
4165 | } | |
4166 | ||
4167 | skmsg->offset = (u32)((u64)skmsg->msg_buf & | |
4168 | (~FIT_QCMD_BASE_ADDRESS_MASK)); | |
4169 | skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK; | |
4170 | skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf & | |
4171 | FIT_QCMD_BASE_ADDRESS_MASK); | |
4172 | skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK; | |
4173 | skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK; | |
4174 | memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES); | |
4175 | ||
4176 | skmsg->next = &skmsg[1]; | |
4177 | } | |
4178 | ||
4179 | /* Free list is in order starting with the 0th entry. */ | |
4180 | skdev->skmsg_table[i - 1].next = NULL; | |
4181 | skdev->skmsg_free_list = skdev->skmsg_table; | |
4182 | ||
4183 | err_out: | |
4184 | return rc; | |
4185 | } | |
4186 | ||
542d7b00 BZ |
4187 | static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev, |
4188 | u32 n_sg, | |
4189 | dma_addr_t *ret_dma_addr) | |
4190 | { | |
4191 | struct fit_sg_descriptor *sg_list; | |
4192 | u32 nbytes; | |
4193 | ||
4194 | nbytes = sizeof(*sg_list) * n_sg; | |
4195 | ||
4196 | sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr); | |
4197 | ||
4198 | if (sg_list != NULL) { | |
4199 | uint64_t dma_address = *ret_dma_addr; | |
4200 | u32 i; | |
4201 | ||
4202 | memset(sg_list, 0, nbytes); | |
4203 | ||
4204 | for (i = 0; i < n_sg - 1; i++) { | |
4205 | uint64_t ndp_off; | |
4206 | ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor); | |
4207 | ||
4208 | sg_list[i].next_desc_ptr = dma_address + ndp_off; | |
4209 | } | |
4210 | sg_list[i].next_desc_ptr = 0LL; | |
4211 | } | |
4212 | ||
4213 | return sg_list; | |
4214 | } | |
4215 | ||
e67f86b3 AB |
4216 | static int skd_cons_skreq(struct skd_device *skdev) |
4217 | { | |
4218 | int rc = 0; | |
4219 | u32 i; | |
4220 | ||
2e44b427 | 4221 | pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n", |
4222 | skdev->name, __func__, __LINE__, | |
4223 | sizeof(struct skd_request_context), | |
4224 | skdev->num_req_context, | |
4225 | sizeof(struct skd_request_context) * skdev->num_req_context); | |
e67f86b3 AB |
4226 | |
4227 | skdev->skreq_table = kzalloc(sizeof(struct skd_request_context) | |
4228 | * skdev->num_req_context, GFP_KERNEL); | |
4229 | if (skdev->skreq_table == NULL) { | |
4230 | rc = -ENOMEM; | |
4231 | goto err_out; | |
4232 | } | |
4233 | ||
2e44b427 | 4234 | pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n", |
4235 | skdev->name, __func__, __LINE__, | |
4236 | skdev->sgs_per_request, sizeof(struct scatterlist), | |
4237 | skdev->sgs_per_request * sizeof(struct scatterlist)); | |
e67f86b3 AB |
4238 | |
4239 | for (i = 0; i < skdev->num_req_context; i++) { | |
4240 | struct skd_request_context *skreq; | |
4241 | ||
4242 | skreq = &skdev->skreq_table[i]; | |
4243 | ||
4244 | skreq->id = i + SKD_ID_RW_REQUEST; | |
4245 | skreq->state = SKD_REQ_STATE_IDLE; | |
4246 | ||
4247 | skreq->sg = kzalloc(sizeof(struct scatterlist) * | |
4248 | skdev->sgs_per_request, GFP_KERNEL); | |
4249 | if (skreq->sg == NULL) { | |
4250 | rc = -ENOMEM; | |
4251 | goto err_out; | |
4252 | } | |
4253 | sg_init_table(skreq->sg, skdev->sgs_per_request); | |
4254 | ||
4255 | skreq->sksg_list = skd_cons_sg_list(skdev, | |
4256 | skdev->sgs_per_request, | |
4257 | &skreq->sksg_dma_address); | |
4258 | ||
4259 | if (skreq->sksg_list == NULL) { | |
4260 | rc = -ENOMEM; | |
4261 | goto err_out; | |
4262 | } | |
4263 | ||
4264 | skreq->next = &skreq[1]; | |
4265 | } | |
4266 | ||
4267 | /* Free list is in order starting with the 0th entry. */ | |
4268 | skdev->skreq_table[i - 1].next = NULL; | |
4269 | skdev->skreq_free_list = skdev->skreq_table; | |
4270 | ||
4271 | err_out: | |
4272 | return rc; | |
4273 | } | |
4274 | ||
4275 | static int skd_cons_skspcl(struct skd_device *skdev) | |
4276 | { | |
4277 | int rc = 0; | |
4278 | u32 i, nbytes; | |
4279 | ||
2e44b427 | 4280 | pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n", |
4281 | skdev->name, __func__, __LINE__, | |
4282 | sizeof(struct skd_special_context), | |
4283 | skdev->n_special, | |
4284 | sizeof(struct skd_special_context) * skdev->n_special); | |
e67f86b3 AB |
4285 | |
4286 | skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context) | |
4287 | * skdev->n_special, GFP_KERNEL); | |
4288 | if (skdev->skspcl_table == NULL) { | |
4289 | rc = -ENOMEM; | |
4290 | goto err_out; | |
4291 | } | |
4292 | ||
4293 | for (i = 0; i < skdev->n_special; i++) { | |
4294 | struct skd_special_context *skspcl; | |
4295 | ||
4296 | skspcl = &skdev->skspcl_table[i]; | |
4297 | ||
4298 | skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST; | |
4299 | skspcl->req.state = SKD_REQ_STATE_IDLE; | |
4300 | ||
4301 | skspcl->req.next = &skspcl[1].req; | |
4302 | ||
4303 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; | |
4304 | ||
a5bbf616 JP |
4305 | skspcl->msg_buf = |
4306 | pci_zalloc_consistent(skdev->pdev, nbytes, | |
4307 | &skspcl->mb_dma_address); | |
e67f86b3 AB |
4308 | if (skspcl->msg_buf == NULL) { |
4309 | rc = -ENOMEM; | |
4310 | goto err_out; | |
4311 | } | |
4312 | ||
e67f86b3 AB |
4313 | skspcl->req.sg = kzalloc(sizeof(struct scatterlist) * |
4314 | SKD_N_SG_PER_SPECIAL, GFP_KERNEL); | |
4315 | if (skspcl->req.sg == NULL) { | |
4316 | rc = -ENOMEM; | |
4317 | goto err_out; | |
4318 | } | |
4319 | ||
4320 | skspcl->req.sksg_list = skd_cons_sg_list(skdev, | |
4321 | SKD_N_SG_PER_SPECIAL, | |
4322 | &skspcl->req. | |
4323 | sksg_dma_address); | |
4324 | if (skspcl->req.sksg_list == NULL) { | |
4325 | rc = -ENOMEM; | |
4326 | goto err_out; | |
4327 | } | |
4328 | } | |
4329 | ||
4330 | /* Free list is in order starting with the 0th entry. */ | |
4331 | skdev->skspcl_table[i - 1].req.next = NULL; | |
4332 | skdev->skspcl_free_list = skdev->skspcl_table; | |
4333 | ||
4334 | return rc; | |
4335 | ||
4336 | err_out: | |
4337 | return rc; | |
4338 | } | |
4339 | ||
4340 | static int skd_cons_sksb(struct skd_device *skdev) | |
4341 | { | |
4342 | int rc = 0; | |
4343 | struct skd_special_context *skspcl; | |
4344 | u32 nbytes; | |
4345 | ||
4346 | skspcl = &skdev->internal_skspcl; | |
4347 | ||
4348 | skspcl->req.id = 0 + SKD_ID_INTERNAL; | |
4349 | skspcl->req.state = SKD_REQ_STATE_IDLE; | |
4350 | ||
4351 | nbytes = SKD_N_INTERNAL_BYTES; | |
4352 | ||
a5bbf616 JP |
4353 | skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes, |
4354 | &skspcl->db_dma_address); | |
e67f86b3 AB |
4355 | if (skspcl->data_buf == NULL) { |
4356 | rc = -ENOMEM; | |
4357 | goto err_out; | |
4358 | } | |
4359 | ||
e67f86b3 | 4360 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; |
a5bbf616 JP |
4361 | skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes, |
4362 | &skspcl->mb_dma_address); | |
e67f86b3 AB |
4363 | if (skspcl->msg_buf == NULL) { |
4364 | rc = -ENOMEM; | |
4365 | goto err_out; | |
4366 | } | |
4367 | ||
e67f86b3 AB |
4368 | skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1, |
4369 | &skspcl->req.sksg_dma_address); | |
4370 | if (skspcl->req.sksg_list == NULL) { | |
4371 | rc = -ENOMEM; | |
4372 | goto err_out; | |
4373 | } | |
4374 | ||
4375 | if (!skd_format_internal_skspcl(skdev)) { | |
4376 | rc = -EINVAL; | |
4377 | goto err_out; | |
4378 | } | |
4379 | ||
4380 | err_out: | |
4381 | return rc; | |
4382 | } | |
4383 | ||
e67f86b3 AB |
4384 | static int skd_cons_disk(struct skd_device *skdev) |
4385 | { | |
4386 | int rc = 0; | |
4387 | struct gendisk *disk; | |
4388 | struct request_queue *q; | |
4389 | unsigned long flags; | |
4390 | ||
4391 | disk = alloc_disk(SKD_MINORS_PER_DEVICE); | |
4392 | if (!disk) { | |
4393 | rc = -ENOMEM; | |
4394 | goto err_out; | |
4395 | } | |
4396 | ||
4397 | skdev->disk = disk; | |
4398 | sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno); | |
4399 | ||
4400 | disk->major = skdev->major; | |
4401 | disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE; | |
4402 | disk->fops = &skd_blockdev_ops; | |
4403 | disk->private_data = skdev; | |
4404 | ||
fcd37eb3 | 4405 | q = blk_init_queue(skd_request_fn, &skdev->lock); |
e67f86b3 AB |
4406 | if (!q) { |
4407 | rc = -ENOMEM; | |
4408 | goto err_out; | |
4409 | } | |
4410 | ||
4411 | skdev->queue = q; | |
4412 | disk->queue = q; | |
4413 | q->queuedata = skdev; | |
4414 | ||
e67f86b3 AB |
4415 | blk_queue_flush(q, REQ_FLUSH | REQ_FUA); |
4416 | blk_queue_max_segments(q, skdev->sgs_per_request); | |
4417 | blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS); | |
4418 | ||
4419 | /* set sysfs ptimal_io_size to 8K */ | |
4420 | blk_queue_io_opt(q, 8192); | |
4421 | ||
4422 | /* DISCARD Flag initialization. */ | |
4423 | q->limits.discard_granularity = 8192; | |
4424 | q->limits.discard_alignment = 0; | |
4425 | q->limits.max_discard_sectors = UINT_MAX >> 9; | |
4426 | q->limits.discard_zeroes_data = 1; | |
4427 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | |
4428 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); | |
4429 | ||
4430 | spin_lock_irqsave(&skdev->lock, flags); | |
2e44b427 | 4431 | pr_debug("%s:%s:%d stopping %s queue\n", |
4432 | skdev->name, __func__, __LINE__, skdev->name); | |
6a5ec65b | 4433 | blk_stop_queue(skdev->queue); |
e67f86b3 AB |
4434 | spin_unlock_irqrestore(&skdev->lock, flags); |
4435 | ||
4436 | err_out: | |
4437 | return rc; | |
4438 | } | |
4439 | ||
542d7b00 BZ |
4440 | #define SKD_N_DEV_TABLE 16u |
4441 | static u32 skd_next_devno; | |
e67f86b3 | 4442 | |
542d7b00 | 4443 | static struct skd_device *skd_construct(struct pci_dev *pdev) |
e67f86b3 | 4444 | { |
542d7b00 BZ |
4445 | struct skd_device *skdev; |
4446 | int blk_major = skd_major; | |
4447 | int rc; | |
e67f86b3 | 4448 | |
542d7b00 | 4449 | skdev = kzalloc(sizeof(*skdev), GFP_KERNEL); |
e67f86b3 | 4450 | |
542d7b00 BZ |
4451 | if (!skdev) { |
4452 | pr_err(PFX "(%s): memory alloc failure\n", | |
4453 | pci_name(pdev)); | |
4454 | return NULL; | |
4455 | } | |
e67f86b3 | 4456 | |
542d7b00 BZ |
4457 | skdev->state = SKD_DRVR_STATE_LOAD; |
4458 | skdev->pdev = pdev; | |
4459 | skdev->devno = skd_next_devno++; | |
4460 | skdev->major = blk_major; | |
4461 | skdev->irq_type = skd_isr_type; | |
4462 | sprintf(skdev->name, DRV_NAME "%d", skdev->devno); | |
4463 | skdev->dev_max_queue_depth = 0; | |
e67f86b3 | 4464 | |
542d7b00 BZ |
4465 | skdev->num_req_context = skd_max_queue_depth; |
4466 | skdev->num_fitmsg_context = skd_max_queue_depth; | |
4467 | skdev->n_special = skd_max_pass_thru; | |
4468 | skdev->cur_max_queue_depth = 1; | |
4469 | skdev->queue_low_water_mark = 1; | |
4470 | skdev->proto_ver = 99; | |
4471 | skdev->sgs_per_request = skd_sgs_per_request; | |
4472 | skdev->dbg_level = skd_dbg_level; | |
e67f86b3 | 4473 | |
542d7b00 | 4474 | atomic_set(&skdev->device_count, 0); |
e67f86b3 | 4475 | |
542d7b00 BZ |
4476 | spin_lock_init(&skdev->lock); |
4477 | ||
4478 | INIT_WORK(&skdev->completion_worker, skd_completion_worker); | |
e67f86b3 | 4479 | |
2e44b427 | 4480 | pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__); |
542d7b00 BZ |
4481 | rc = skd_cons_skcomp(skdev); |
4482 | if (rc < 0) | |
4483 | goto err_out; | |
e67f86b3 | 4484 | |
542d7b00 BZ |
4485 | pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__); |
4486 | rc = skd_cons_skmsg(skdev); | |
4487 | if (rc < 0) | |
4488 | goto err_out; | |
4489 | ||
4490 | pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__); | |
4491 | rc = skd_cons_skreq(skdev); | |
4492 | if (rc < 0) | |
4493 | goto err_out; | |
4494 | ||
4495 | pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__); | |
4496 | rc = skd_cons_skspcl(skdev); | |
4497 | if (rc < 0) | |
4498 | goto err_out; | |
4499 | ||
4500 | pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__); | |
4501 | rc = skd_cons_sksb(skdev); | |
4502 | if (rc < 0) | |
4503 | goto err_out; | |
4504 | ||
4505 | pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__); | |
4506 | rc = skd_cons_disk(skdev); | |
4507 | if (rc < 0) | |
4508 | goto err_out; | |
4509 | ||
4510 | pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__); | |
4511 | return skdev; | |
4512 | ||
4513 | err_out: | |
4514 | pr_debug("%s:%s:%d construct failed\n", | |
4515 | skdev->name, __func__, __LINE__); | |
4516 | skd_destruct(skdev); | |
4517 | return NULL; | |
e67f86b3 AB |
4518 | } |
4519 | ||
542d7b00 BZ |
4520 | /* |
4521 | ***************************************************************************** | |
4522 | * DESTRUCT (FREE) | |
4523 | ***************************************************************************** | |
4524 | */ | |
4525 | ||
e67f86b3 AB |
4526 | static void skd_free_skcomp(struct skd_device *skdev) |
4527 | { | |
4528 | if (skdev->skcomp_table != NULL) { | |
4529 | u32 nbytes; | |
4530 | ||
4531 | nbytes = sizeof(skdev->skcomp_table[0]) * | |
4532 | SKD_N_COMPLETION_ENTRY; | |
4533 | pci_free_consistent(skdev->pdev, nbytes, | |
4534 | skdev->skcomp_table, skdev->cq_dma_address); | |
4535 | } | |
4536 | ||
4537 | skdev->skcomp_table = NULL; | |
4538 | skdev->cq_dma_address = 0; | |
4539 | } | |
4540 | ||
4541 | static void skd_free_skmsg(struct skd_device *skdev) | |
4542 | { | |
4543 | u32 i; | |
4544 | ||
4545 | if (skdev->skmsg_table == NULL) | |
4546 | return; | |
4547 | ||
4548 | for (i = 0; i < skdev->num_fitmsg_context; i++) { | |
4549 | struct skd_fitmsg_context *skmsg; | |
4550 | ||
4551 | skmsg = &skdev->skmsg_table[i]; | |
4552 | ||
4553 | if (skmsg->msg_buf != NULL) { | |
4554 | skmsg->msg_buf += skmsg->offset; | |
4555 | skmsg->mb_dma_address += skmsg->offset; | |
4556 | pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES, | |
4557 | skmsg->msg_buf, | |
4558 | skmsg->mb_dma_address); | |
4559 | } | |
4560 | skmsg->msg_buf = NULL; | |
4561 | skmsg->mb_dma_address = 0; | |
4562 | } | |
4563 | ||
4564 | kfree(skdev->skmsg_table); | |
4565 | skdev->skmsg_table = NULL; | |
4566 | } | |
4567 | ||
542d7b00 BZ |
4568 | static void skd_free_sg_list(struct skd_device *skdev, |
4569 | struct fit_sg_descriptor *sg_list, | |
4570 | u32 n_sg, dma_addr_t dma_addr) | |
4571 | { | |
4572 | if (sg_list != NULL) { | |
4573 | u32 nbytes; | |
4574 | ||
4575 | nbytes = sizeof(*sg_list) * n_sg; | |
4576 | ||
4577 | pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr); | |
4578 | } | |
4579 | } | |
4580 | ||
e67f86b3 AB |
4581 | static void skd_free_skreq(struct skd_device *skdev) |
4582 | { | |
4583 | u32 i; | |
4584 | ||
4585 | if (skdev->skreq_table == NULL) | |
4586 | return; | |
4587 | ||
4588 | for (i = 0; i < skdev->num_req_context; i++) { | |
4589 | struct skd_request_context *skreq; | |
4590 | ||
4591 | skreq = &skdev->skreq_table[i]; | |
4592 | ||
4593 | skd_free_sg_list(skdev, skreq->sksg_list, | |
4594 | skdev->sgs_per_request, | |
4595 | skreq->sksg_dma_address); | |
4596 | ||
4597 | skreq->sksg_list = NULL; | |
4598 | skreq->sksg_dma_address = 0; | |
4599 | ||
4600 | kfree(skreq->sg); | |
4601 | } | |
4602 | ||
4603 | kfree(skdev->skreq_table); | |
4604 | skdev->skreq_table = NULL; | |
4605 | } | |
4606 | ||
4607 | static void skd_free_skspcl(struct skd_device *skdev) | |
4608 | { | |
4609 | u32 i; | |
4610 | u32 nbytes; | |
4611 | ||
4612 | if (skdev->skspcl_table == NULL) | |
4613 | return; | |
4614 | ||
4615 | for (i = 0; i < skdev->n_special; i++) { | |
4616 | struct skd_special_context *skspcl; | |
4617 | ||
4618 | skspcl = &skdev->skspcl_table[i]; | |
4619 | ||
4620 | if (skspcl->msg_buf != NULL) { | |
4621 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; | |
4622 | pci_free_consistent(skdev->pdev, nbytes, | |
4623 | skspcl->msg_buf, | |
4624 | skspcl->mb_dma_address); | |
4625 | } | |
4626 | ||
4627 | skspcl->msg_buf = NULL; | |
4628 | skspcl->mb_dma_address = 0; | |
4629 | ||
4630 | skd_free_sg_list(skdev, skspcl->req.sksg_list, | |
4631 | SKD_N_SG_PER_SPECIAL, | |
4632 | skspcl->req.sksg_dma_address); | |
4633 | ||
4634 | skspcl->req.sksg_list = NULL; | |
4635 | skspcl->req.sksg_dma_address = 0; | |
4636 | ||
4637 | kfree(skspcl->req.sg); | |
4638 | } | |
4639 | ||
4640 | kfree(skdev->skspcl_table); | |
4641 | skdev->skspcl_table = NULL; | |
4642 | } | |
4643 | ||
4644 | static void skd_free_sksb(struct skd_device *skdev) | |
4645 | { | |
4646 | struct skd_special_context *skspcl; | |
4647 | u32 nbytes; | |
4648 | ||
4649 | skspcl = &skdev->internal_skspcl; | |
4650 | ||
4651 | if (skspcl->data_buf != NULL) { | |
4652 | nbytes = SKD_N_INTERNAL_BYTES; | |
4653 | ||
4654 | pci_free_consistent(skdev->pdev, nbytes, | |
4655 | skspcl->data_buf, skspcl->db_dma_address); | |
4656 | } | |
4657 | ||
4658 | skspcl->data_buf = NULL; | |
4659 | skspcl->db_dma_address = 0; | |
4660 | ||
4661 | if (skspcl->msg_buf != NULL) { | |
4662 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; | |
4663 | pci_free_consistent(skdev->pdev, nbytes, | |
4664 | skspcl->msg_buf, skspcl->mb_dma_address); | |
4665 | } | |
4666 | ||
4667 | skspcl->msg_buf = NULL; | |
4668 | skspcl->mb_dma_address = 0; | |
4669 | ||
4670 | skd_free_sg_list(skdev, skspcl->req.sksg_list, 1, | |
4671 | skspcl->req.sksg_dma_address); | |
4672 | ||
4673 | skspcl->req.sksg_list = NULL; | |
4674 | skspcl->req.sksg_dma_address = 0; | |
4675 | } | |
4676 | ||
e67f86b3 AB |
4677 | static void skd_free_disk(struct skd_device *skdev) |
4678 | { | |
4679 | struct gendisk *disk = skdev->disk; | |
4680 | ||
4681 | if (disk != NULL) { | |
4682 | struct request_queue *q = disk->queue; | |
4683 | ||
4684 | if (disk->flags & GENHD_FL_UP) | |
4685 | del_gendisk(disk); | |
4686 | if (q) | |
4687 | blk_cleanup_queue(q); | |
4688 | put_disk(disk); | |
4689 | } | |
4690 | skdev->disk = NULL; | |
4691 | } | |
4692 | ||
542d7b00 BZ |
4693 | static void skd_destruct(struct skd_device *skdev) |
4694 | { | |
4695 | if (skdev == NULL) | |
4696 | return; | |
4697 | ||
4698 | ||
4699 | pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__); | |
4700 | skd_free_disk(skdev); | |
4701 | ||
4702 | pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__); | |
4703 | skd_free_sksb(skdev); | |
4704 | ||
4705 | pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__); | |
4706 | skd_free_skspcl(skdev); | |
4707 | ||
4708 | pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__); | |
4709 | skd_free_skreq(skdev); | |
4710 | ||
4711 | pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__); | |
4712 | skd_free_skmsg(skdev); | |
e67f86b3 | 4713 | |
542d7b00 BZ |
4714 | pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__); |
4715 | skd_free_skcomp(skdev); | |
4716 | ||
4717 | pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__); | |
4718 | kfree(skdev); | |
4719 | } | |
e67f86b3 AB |
4720 | |
4721 | /* | |
4722 | ***************************************************************************** | |
4723 | * BLOCK DEVICE (BDEV) GLUE | |
4724 | ***************************************************************************** | |
4725 | */ | |
4726 | ||
4727 | static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |
4728 | { | |
4729 | struct skd_device *skdev; | |
4730 | u64 capacity; | |
4731 | ||
4732 | skdev = bdev->bd_disk->private_data; | |
4733 | ||
2e44b427 | 4734 | pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n", |
4735 | skdev->name, __func__, __LINE__, | |
4736 | bdev->bd_disk->disk_name, current->comm); | |
e67f86b3 AB |
4737 | |
4738 | if (skdev->read_cap_is_valid) { | |
4739 | capacity = get_capacity(skdev->disk); | |
4740 | geo->heads = 64; | |
4741 | geo->sectors = 255; | |
4742 | geo->cylinders = (capacity) / (255 * 64); | |
4743 | ||
4744 | return 0; | |
4745 | } | |
4746 | return -EIO; | |
4747 | } | |
4748 | ||
4749 | static int skd_bdev_attach(struct skd_device *skdev) | |
4750 | { | |
2e44b427 | 4751 | pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__); |
e67f86b3 AB |
4752 | add_disk(skdev->disk); |
4753 | return 0; | |
4754 | } | |
4755 | ||
4756 | static const struct block_device_operations skd_blockdev_ops = { | |
4757 | .owner = THIS_MODULE, | |
4758 | .ioctl = skd_bdev_ioctl, | |
4759 | .getgeo = skd_bdev_getgeo, | |
4760 | }; | |
4761 | ||
4762 | ||
4763 | /* | |
4764 | ***************************************************************************** | |
4765 | * PCIe DRIVER GLUE | |
4766 | ***************************************************************************** | |
4767 | */ | |
4768 | ||
9baa3c34 | 4769 | static const struct pci_device_id skd_pci_tbl[] = { |
e67f86b3 AB |
4770 | { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120, |
4771 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, | |
4772 | { 0 } /* terminate list */ | |
4773 | }; | |
4774 | ||
4775 | MODULE_DEVICE_TABLE(pci, skd_pci_tbl); | |
4776 | ||
4777 | static char *skd_pci_info(struct skd_device *skdev, char *str) | |
4778 | { | |
4779 | int pcie_reg; | |
4780 | ||
4781 | strcpy(str, "PCIe ("); | |
4782 | pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP); | |
4783 | ||
4784 | if (pcie_reg) { | |
4785 | ||
4786 | char lwstr[6]; | |
4787 | uint16_t pcie_lstat, lspeed, lwidth; | |
4788 | ||
4789 | pcie_reg += 0x12; | |
4790 | pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat); | |
4791 | lspeed = pcie_lstat & (0xF); | |
4792 | lwidth = (pcie_lstat & 0x3F0) >> 4; | |
4793 | ||
4794 | if (lspeed == 1) | |
4795 | strcat(str, "2.5GT/s "); | |
4796 | else if (lspeed == 2) | |
4797 | strcat(str, "5.0GT/s "); | |
4798 | else | |
4799 | strcat(str, "<unknown> "); | |
4800 | snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth); | |
4801 | strcat(str, lwstr); | |
4802 | } | |
4803 | return str; | |
4804 | } | |
4805 | ||
4806 | static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
4807 | { | |
4808 | int i; | |
4809 | int rc = 0; | |
4810 | char pci_str[32]; | |
4811 | struct skd_device *skdev; | |
4812 | ||
4813 | pr_info("STEC s1120 Driver(%s) version %s-b%s\n", | |
4814 | DRV_NAME, DRV_VERSION, DRV_BUILD_ID); | |
4815 | pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n", | |
4816 | pci_name(pdev), pdev->vendor, pdev->device); | |
4817 | ||
4818 | rc = pci_enable_device(pdev); | |
4819 | if (rc) | |
4820 | return rc; | |
4821 | rc = pci_request_regions(pdev, DRV_NAME); | |
4822 | if (rc) | |
4823 | goto err_out; | |
4824 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
4825 | if (!rc) { | |
4826 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { | |
4827 | ||
4828 | pr_err("(%s): consistent DMA mask error %d\n", | |
4829 | pci_name(pdev), rc); | |
4830 | } | |
4831 | } else { | |
4832 | (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))); | |
4833 | if (rc) { | |
4834 | ||
4835 | pr_err("(%s): DMA mask error %d\n", | |
4836 | pci_name(pdev), rc); | |
4837 | goto err_out_regions; | |
4838 | } | |
4839 | } | |
4840 | ||
b8df6647 BZ |
4841 | if (!skd_major) { |
4842 | rc = register_blkdev(0, DRV_NAME); | |
4843 | if (rc < 0) | |
4844 | goto err_out_regions; | |
4845 | BUG_ON(!rc); | |
4846 | skd_major = rc; | |
4847 | } | |
4848 | ||
e67f86b3 | 4849 | skdev = skd_construct(pdev); |
1762b57f WY |
4850 | if (skdev == NULL) { |
4851 | rc = -ENOMEM; | |
e67f86b3 | 4852 | goto err_out_regions; |
1762b57f | 4853 | } |
e67f86b3 AB |
4854 | |
4855 | skd_pci_info(skdev, pci_str); | |
4856 | pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str); | |
4857 | ||
4858 | pci_set_master(pdev); | |
4859 | rc = pci_enable_pcie_error_reporting(pdev); | |
4860 | if (rc) { | |
4861 | pr_err( | |
4862 | "(%s): bad enable of PCIe error reporting rc=%d\n", | |
4863 | skd_name(skdev), rc); | |
4864 | skdev->pcie_error_reporting_is_enabled = 0; | |
4865 | } else | |
4866 | skdev->pcie_error_reporting_is_enabled = 1; | |
4867 | ||
4868 | ||
4869 | pci_set_drvdata(pdev, skdev); | |
ebedd16d | 4870 | |
e67f86b3 AB |
4871 | skdev->disk->driverfs_dev = &pdev->dev; |
4872 | ||
4873 | for (i = 0; i < SKD_MAX_BARS; i++) { | |
4874 | skdev->mem_phys[i] = pci_resource_start(pdev, i); | |
4875 | skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); | |
4876 | skdev->mem_map[i] = ioremap(skdev->mem_phys[i], | |
4877 | skdev->mem_size[i]); | |
4878 | if (!skdev->mem_map[i]) { | |
4879 | pr_err("(%s): Unable to map adapter memory!\n", | |
4880 | skd_name(skdev)); | |
4881 | rc = -ENODEV; | |
4882 | goto err_out_iounmap; | |
4883 | } | |
2e44b427 | 4884 | pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n", |
4885 | skdev->name, __func__, __LINE__, | |
4886 | skdev->mem_map[i], | |
4887 | (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]); | |
e67f86b3 AB |
4888 | } |
4889 | ||
4890 | rc = skd_acquire_irq(skdev); | |
4891 | if (rc) { | |
4892 | pr_err("(%s): interrupt resource error %d\n", | |
4893 | skd_name(skdev), rc); | |
4894 | goto err_out_iounmap; | |
4895 | } | |
4896 | ||
4897 | rc = skd_start_timer(skdev); | |
4898 | if (rc) | |
4899 | goto err_out_timer; | |
4900 | ||
4901 | init_waitqueue_head(&skdev->waitq); | |
4902 | ||
4903 | skd_start_device(skdev); | |
4904 | ||
4905 | rc = wait_event_interruptible_timeout(skdev->waitq, | |
4906 | (skdev->gendisk_on), | |
4907 | (SKD_START_WAIT_SECONDS * HZ)); | |
4908 | if (skdev->gendisk_on > 0) { | |
4909 | /* device came on-line after reset */ | |
4910 | skd_bdev_attach(skdev); | |
4911 | rc = 0; | |
4912 | } else { | |
4913 | /* we timed out, something is wrong with the device, | |
4914 | don't add the disk structure */ | |
4915 | pr_err( | |
4916 | "(%s): error: waiting for s1120 timed out %d!\n", | |
4917 | skd_name(skdev), rc); | |
4918 | /* in case of no error; we timeout with ENXIO */ | |
4919 | if (!rc) | |
4920 | rc = -ENXIO; | |
4921 | goto err_out_timer; | |
4922 | } | |
4923 | ||
4924 | ||
4925 | #ifdef SKD_VMK_POLL_HANDLER | |
4926 | if (skdev->irq_type == SKD_IRQ_MSIX) { | |
4927 | /* MSIX completion handler is being used for coredump */ | |
4928 | vmklnx_scsi_register_poll_handler(skdev->scsi_host, | |
4929 | skdev->msix_entries[5].vector, | |
4930 | skd_comp_q, skdev); | |
4931 | } else { | |
4932 | vmklnx_scsi_register_poll_handler(skdev->scsi_host, | |
4933 | skdev->pdev->irq, skd_isr, | |
4934 | skdev); | |
4935 | } | |
4936 | #endif /* SKD_VMK_POLL_HANDLER */ | |
4937 | ||
4938 | return rc; | |
4939 | ||
4940 | err_out_timer: | |
4941 | skd_stop_device(skdev); | |
4942 | skd_release_irq(skdev); | |
4943 | ||
4944 | err_out_iounmap: | |
4945 | for (i = 0; i < SKD_MAX_BARS; i++) | |
4946 | if (skdev->mem_map[i]) | |
4947 | iounmap(skdev->mem_map[i]); | |
4948 | ||
4949 | if (skdev->pcie_error_reporting_is_enabled) | |
4950 | pci_disable_pcie_error_reporting(pdev); | |
4951 | ||
4952 | skd_destruct(skdev); | |
4953 | ||
4954 | err_out_regions: | |
4955 | pci_release_regions(pdev); | |
4956 | ||
4957 | err_out: | |
4958 | pci_disable_device(pdev); | |
4959 | pci_set_drvdata(pdev, NULL); | |
4960 | return rc; | |
4961 | } | |
4962 | ||
4963 | static void skd_pci_remove(struct pci_dev *pdev) | |
4964 | { | |
4965 | int i; | |
4966 | struct skd_device *skdev; | |
4967 | ||
4968 | skdev = pci_get_drvdata(pdev); | |
4969 | if (!skdev) { | |
4970 | pr_err("%s: no device data for PCI\n", pci_name(pdev)); | |
4971 | return; | |
4972 | } | |
4973 | skd_stop_device(skdev); | |
4974 | skd_release_irq(skdev); | |
4975 | ||
4976 | for (i = 0; i < SKD_MAX_BARS; i++) | |
4977 | if (skdev->mem_map[i]) | |
4978 | iounmap((u32 *)skdev->mem_map[i]); | |
4979 | ||
4980 | if (skdev->pcie_error_reporting_is_enabled) | |
4981 | pci_disable_pcie_error_reporting(pdev); | |
4982 | ||
4983 | skd_destruct(skdev); | |
4984 | ||
4985 | pci_release_regions(pdev); | |
4986 | pci_disable_device(pdev); | |
4987 | pci_set_drvdata(pdev, NULL); | |
4988 | ||
4989 | return; | |
4990 | } | |
4991 | ||
4992 | static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state) | |
4993 | { | |
4994 | int i; | |
4995 | struct skd_device *skdev; | |
4996 | ||
4997 | skdev = pci_get_drvdata(pdev); | |
4998 | if (!skdev) { | |
4999 | pr_err("%s: no device data for PCI\n", pci_name(pdev)); | |
5000 | return -EIO; | |
5001 | } | |
5002 | ||
5003 | skd_stop_device(skdev); | |
5004 | ||
5005 | skd_release_irq(skdev); | |
5006 | ||
5007 | for (i = 0; i < SKD_MAX_BARS; i++) | |
5008 | if (skdev->mem_map[i]) | |
5009 | iounmap((u32 *)skdev->mem_map[i]); | |
5010 | ||
5011 | if (skdev->pcie_error_reporting_is_enabled) | |
5012 | pci_disable_pcie_error_reporting(pdev); | |
5013 | ||
5014 | pci_release_regions(pdev); | |
5015 | pci_save_state(pdev); | |
5016 | pci_disable_device(pdev); | |
5017 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
5018 | return 0; | |
5019 | } | |
5020 | ||
5021 | static int skd_pci_resume(struct pci_dev *pdev) | |
5022 | { | |
5023 | int i; | |
5024 | int rc = 0; | |
5025 | struct skd_device *skdev; | |
5026 | ||
5027 | skdev = pci_get_drvdata(pdev); | |
5028 | if (!skdev) { | |
5029 | pr_err("%s: no device data for PCI\n", pci_name(pdev)); | |
5030 | return -1; | |
5031 | } | |
5032 | ||
5033 | pci_set_power_state(pdev, PCI_D0); | |
5034 | pci_enable_wake(pdev, PCI_D0, 0); | |
5035 | pci_restore_state(pdev); | |
5036 | ||
5037 | rc = pci_enable_device(pdev); | |
5038 | if (rc) | |
5039 | return rc; | |
5040 | rc = pci_request_regions(pdev, DRV_NAME); | |
5041 | if (rc) | |
5042 | goto err_out; | |
5043 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
5044 | if (!rc) { | |
5045 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { | |
5046 | ||
5047 | pr_err("(%s): consistent DMA mask error %d\n", | |
5048 | pci_name(pdev), rc); | |
5049 | } | |
5050 | } else { | |
5051 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
5052 | if (rc) { | |
5053 | ||
5054 | pr_err("(%s): DMA mask error %d\n", | |
5055 | pci_name(pdev), rc); | |
5056 | goto err_out_regions; | |
5057 | } | |
5058 | } | |
5059 | ||
5060 | pci_set_master(pdev); | |
5061 | rc = pci_enable_pcie_error_reporting(pdev); | |
5062 | if (rc) { | |
5063 | pr_err("(%s): bad enable of PCIe error reporting rc=%d\n", | |
5064 | skdev->name, rc); | |
5065 | skdev->pcie_error_reporting_is_enabled = 0; | |
5066 | } else | |
5067 | skdev->pcie_error_reporting_is_enabled = 1; | |
5068 | ||
5069 | for (i = 0; i < SKD_MAX_BARS; i++) { | |
5070 | ||
5071 | skdev->mem_phys[i] = pci_resource_start(pdev, i); | |
5072 | skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); | |
5073 | skdev->mem_map[i] = ioremap(skdev->mem_phys[i], | |
5074 | skdev->mem_size[i]); | |
5075 | if (!skdev->mem_map[i]) { | |
5076 | pr_err("(%s): Unable to map adapter memory!\n", | |
5077 | skd_name(skdev)); | |
5078 | rc = -ENODEV; | |
5079 | goto err_out_iounmap; | |
5080 | } | |
2e44b427 | 5081 | pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n", |
5082 | skdev->name, __func__, __LINE__, | |
5083 | skdev->mem_map[i], | |
5084 | (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]); | |
e67f86b3 AB |
5085 | } |
5086 | rc = skd_acquire_irq(skdev); | |
5087 | if (rc) { | |
5088 | ||
5089 | pr_err("(%s): interrupt resource error %d\n", | |
5090 | pci_name(pdev), rc); | |
5091 | goto err_out_iounmap; | |
5092 | } | |
5093 | ||
5094 | rc = skd_start_timer(skdev); | |
5095 | if (rc) | |
5096 | goto err_out_timer; | |
5097 | ||
5098 | init_waitqueue_head(&skdev->waitq); | |
5099 | ||
5100 | skd_start_device(skdev); | |
5101 | ||
5102 | return rc; | |
5103 | ||
5104 | err_out_timer: | |
5105 | skd_stop_device(skdev); | |
5106 | skd_release_irq(skdev); | |
5107 | ||
5108 | err_out_iounmap: | |
5109 | for (i = 0; i < SKD_MAX_BARS; i++) | |
5110 | if (skdev->mem_map[i]) | |
5111 | iounmap(skdev->mem_map[i]); | |
5112 | ||
5113 | if (skdev->pcie_error_reporting_is_enabled) | |
5114 | pci_disable_pcie_error_reporting(pdev); | |
5115 | ||
5116 | err_out_regions: | |
5117 | pci_release_regions(pdev); | |
5118 | ||
5119 | err_out: | |
5120 | pci_disable_device(pdev); | |
5121 | return rc; | |
5122 | } | |
5123 | ||
5124 | static void skd_pci_shutdown(struct pci_dev *pdev) | |
5125 | { | |
5126 | struct skd_device *skdev; | |
5127 | ||
5128 | pr_err("skd_pci_shutdown called\n"); | |
5129 | ||
5130 | skdev = pci_get_drvdata(pdev); | |
5131 | if (!skdev) { | |
5132 | pr_err("%s: no device data for PCI\n", pci_name(pdev)); | |
5133 | return; | |
5134 | } | |
5135 | ||
5136 | pr_err("%s: calling stop\n", skd_name(skdev)); | |
5137 | skd_stop_device(skdev); | |
5138 | } | |
5139 | ||
5140 | static struct pci_driver skd_driver = { | |
5141 | .name = DRV_NAME, | |
5142 | .id_table = skd_pci_tbl, | |
5143 | .probe = skd_pci_probe, | |
5144 | .remove = skd_pci_remove, | |
5145 | .suspend = skd_pci_suspend, | |
5146 | .resume = skd_pci_resume, | |
5147 | .shutdown = skd_pci_shutdown, | |
5148 | }; | |
5149 | ||
5150 | /* | |
5151 | ***************************************************************************** | |
5152 | * LOGGING SUPPORT | |
5153 | ***************************************************************************** | |
5154 | */ | |
5155 | ||
5156 | static const char *skd_name(struct skd_device *skdev) | |
5157 | { | |
5158 | memset(skdev->id_str, 0, sizeof(skdev->id_str)); | |
5159 | ||
5160 | if (skdev->inquiry_is_valid) | |
5161 | snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]", | |
5162 | skdev->name, skdev->inq_serial_num, | |
5163 | pci_name(skdev->pdev)); | |
5164 | else | |
5165 | snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]", | |
5166 | skdev->name, pci_name(skdev->pdev)); | |
5167 | ||
5168 | return skdev->id_str; | |
5169 | } | |
5170 | ||
5171 | const char *skd_drive_state_to_str(int state) | |
5172 | { | |
5173 | switch (state) { | |
5174 | case FIT_SR_DRIVE_OFFLINE: | |
5175 | return "OFFLINE"; | |
5176 | case FIT_SR_DRIVE_INIT: | |
5177 | return "INIT"; | |
5178 | case FIT_SR_DRIVE_ONLINE: | |
5179 | return "ONLINE"; | |
5180 | case FIT_SR_DRIVE_BUSY: | |
5181 | return "BUSY"; | |
5182 | case FIT_SR_DRIVE_FAULT: | |
5183 | return "FAULT"; | |
5184 | case FIT_SR_DRIVE_DEGRADED: | |
5185 | return "DEGRADED"; | |
5186 | case FIT_SR_PCIE_LINK_DOWN: | |
5187 | return "INK_DOWN"; | |
5188 | case FIT_SR_DRIVE_SOFT_RESET: | |
5189 | return "SOFT_RESET"; | |
5190 | case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: | |
5191 | return "NEED_FW"; | |
5192 | case FIT_SR_DRIVE_INIT_FAULT: | |
5193 | return "INIT_FAULT"; | |
5194 | case FIT_SR_DRIVE_BUSY_SANITIZE: | |
5195 | return "BUSY_SANITIZE"; | |
5196 | case FIT_SR_DRIVE_BUSY_ERASE: | |
5197 | return "BUSY_ERASE"; | |
5198 | case FIT_SR_DRIVE_FW_BOOTING: | |
5199 | return "FW_BOOTING"; | |
5200 | default: | |
5201 | return "???"; | |
5202 | } | |
5203 | } | |
5204 | ||
5205 | const char *skd_skdev_state_to_str(enum skd_drvr_state state) | |
5206 | { | |
5207 | switch (state) { | |
5208 | case SKD_DRVR_STATE_LOAD: | |
5209 | return "LOAD"; | |
5210 | case SKD_DRVR_STATE_IDLE: | |
5211 | return "IDLE"; | |
5212 | case SKD_DRVR_STATE_BUSY: | |
5213 | return "BUSY"; | |
5214 | case SKD_DRVR_STATE_STARTING: | |
5215 | return "STARTING"; | |
5216 | case SKD_DRVR_STATE_ONLINE: | |
5217 | return "ONLINE"; | |
5218 | case SKD_DRVR_STATE_PAUSING: | |
5219 | return "PAUSING"; | |
5220 | case SKD_DRVR_STATE_PAUSED: | |
5221 | return "PAUSED"; | |
5222 | case SKD_DRVR_STATE_DRAINING_TIMEOUT: | |
5223 | return "DRAINING_TIMEOUT"; | |
5224 | case SKD_DRVR_STATE_RESTARTING: | |
5225 | return "RESTARTING"; | |
5226 | case SKD_DRVR_STATE_RESUMING: | |
5227 | return "RESUMING"; | |
5228 | case SKD_DRVR_STATE_STOPPING: | |
5229 | return "STOPPING"; | |
5230 | case SKD_DRVR_STATE_SYNCING: | |
5231 | return "SYNCING"; | |
5232 | case SKD_DRVR_STATE_FAULT: | |
5233 | return "FAULT"; | |
5234 | case SKD_DRVR_STATE_DISAPPEARED: | |
5235 | return "DISAPPEARED"; | |
5236 | case SKD_DRVR_STATE_BUSY_ERASE: | |
5237 | return "BUSY_ERASE"; | |
5238 | case SKD_DRVR_STATE_BUSY_SANITIZE: | |
5239 | return "BUSY_SANITIZE"; | |
5240 | case SKD_DRVR_STATE_BUSY_IMMINENT: | |
5241 | return "BUSY_IMMINENT"; | |
5242 | case SKD_DRVR_STATE_WAIT_BOOT: | |
5243 | return "WAIT_BOOT"; | |
5244 | ||
5245 | default: | |
5246 | return "???"; | |
5247 | } | |
5248 | } | |
5249 | ||
a26ba7fa | 5250 | static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) |
e67f86b3 AB |
5251 | { |
5252 | switch (state) { | |
5253 | case SKD_MSG_STATE_IDLE: | |
5254 | return "IDLE"; | |
5255 | case SKD_MSG_STATE_BUSY: | |
5256 | return "BUSY"; | |
5257 | default: | |
5258 | return "???"; | |
5259 | } | |
5260 | } | |
5261 | ||
a26ba7fa | 5262 | static const char *skd_skreq_state_to_str(enum skd_req_state state) |
e67f86b3 AB |
5263 | { |
5264 | switch (state) { | |
5265 | case SKD_REQ_STATE_IDLE: | |
5266 | return "IDLE"; | |
5267 | case SKD_REQ_STATE_SETUP: | |
5268 | return "SETUP"; | |
5269 | case SKD_REQ_STATE_BUSY: | |
5270 | return "BUSY"; | |
5271 | case SKD_REQ_STATE_COMPLETED: | |
5272 | return "COMPLETED"; | |
5273 | case SKD_REQ_STATE_TIMEOUT: | |
5274 | return "TIMEOUT"; | |
5275 | case SKD_REQ_STATE_ABORTED: | |
5276 | return "ABORTED"; | |
5277 | default: | |
5278 | return "???"; | |
5279 | } | |
5280 | } | |
5281 | ||
5282 | static void skd_log_skdev(struct skd_device *skdev, const char *event) | |
5283 | { | |
2e44b427 | 5284 | pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n", |
5285 | skdev->name, __func__, __LINE__, skdev->name, skdev, event); | |
5286 | pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n", | |
5287 | skdev->name, __func__, __LINE__, | |
5288 | skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, | |
5289 | skd_skdev_state_to_str(skdev->state), skdev->state); | |
5290 | pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n", | |
5291 | skdev->name, __func__, __LINE__, | |
5292 | skdev->in_flight, skdev->cur_max_queue_depth, | |
5293 | skdev->dev_max_queue_depth, skdev->queue_low_water_mark); | |
5294 | pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n", | |
5295 | skdev->name, __func__, __LINE__, | |
5296 | skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix); | |
e67f86b3 AB |
5297 | } |
5298 | ||
5299 | static void skd_log_skmsg(struct skd_device *skdev, | |
5300 | struct skd_fitmsg_context *skmsg, const char *event) | |
5301 | { | |
2e44b427 | 5302 | pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n", |
5303 | skdev->name, __func__, __LINE__, skdev->name, skmsg, event); | |
5304 | pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n", | |
5305 | skdev->name, __func__, __LINE__, | |
5306 | skd_skmsg_state_to_str(skmsg->state), skmsg->state, | |
5307 | skmsg->id, skmsg->length); | |
e67f86b3 AB |
5308 | } |
5309 | ||
5310 | static void skd_log_skreq(struct skd_device *skdev, | |
5311 | struct skd_request_context *skreq, const char *event) | |
5312 | { | |
2e44b427 | 5313 | pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n", |
5314 | skdev->name, __func__, __LINE__, skdev->name, skreq, event); | |
5315 | pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n", | |
5316 | skdev->name, __func__, __LINE__, | |
5317 | skd_skreq_state_to_str(skreq->state), skreq->state, | |
5318 | skreq->id, skreq->fitmsg_id); | |
5319 | pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n", | |
5320 | skdev->name, __func__, __LINE__, | |
5321 | skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg); | |
e67f86b3 | 5322 | |
fcd37eb3 JA |
5323 | if (skreq->req != NULL) { |
5324 | struct request *req = skreq->req; | |
5325 | u32 lba = (u32)blk_rq_pos(req); | |
5326 | u32 count = blk_rq_sectors(req); | |
e67f86b3 | 5327 | |
fcd37eb3 JA |
5328 | pr_debug("%s:%s:%d " |
5329 | "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", | |
5330 | skdev->name, __func__, __LINE__, | |
5331 | req, lba, lba, count, count, | |
5332 | (int)rq_data_dir(req)); | |
5333 | } else | |
5334 | pr_debug("%s:%s:%d req=NULL\n", | |
5335 | skdev->name, __func__, __LINE__); | |
e67f86b3 AB |
5336 | } |
5337 | ||
5338 | /* | |
5339 | ***************************************************************************** | |
5340 | * MODULE GLUE | |
5341 | ***************************************************************************** | |
5342 | */ | |
5343 | ||
5344 | static int __init skd_init(void) | |
5345 | { | |
e67f86b3 AB |
5346 | pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID); |
5347 | ||
5348 | switch (skd_isr_type) { | |
5349 | case SKD_IRQ_LEGACY: | |
5350 | case SKD_IRQ_MSI: | |
5351 | case SKD_IRQ_MSIX: | |
5352 | break; | |
5353 | default: | |
fbed149a | 5354 | pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n", |
e67f86b3 AB |
5355 | skd_isr_type, SKD_IRQ_DEFAULT); |
5356 | skd_isr_type = SKD_IRQ_DEFAULT; | |
5357 | } | |
5358 | ||
fbed149a BZ |
5359 | if (skd_max_queue_depth < 1 || |
5360 | skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) { | |
5361 | pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n", | |
e67f86b3 AB |
5362 | skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT); |
5363 | skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; | |
5364 | } | |
5365 | ||
5366 | if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) { | |
fbed149a | 5367 | pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n", |
e67f86b3 AB |
5368 | skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT); |
5369 | skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; | |
5370 | } | |
5371 | ||
5372 | if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) { | |
fbed149a | 5373 | pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n", |
e67f86b3 AB |
5374 | skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT); |
5375 | skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; | |
5376 | } | |
5377 | ||
5378 | if (skd_dbg_level < 0 || skd_dbg_level > 2) { | |
fbed149a | 5379 | pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n", |
e67f86b3 AB |
5380 | skd_dbg_level, 0); |
5381 | skd_dbg_level = 0; | |
5382 | } | |
5383 | ||
5384 | if (skd_isr_comp_limit < 0) { | |
fbed149a | 5385 | pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n", |
e67f86b3 AB |
5386 | skd_isr_comp_limit, 0); |
5387 | skd_isr_comp_limit = 0; | |
5388 | } | |
5389 | ||
5390 | if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) { | |
fbed149a | 5391 | pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n", |
e67f86b3 AB |
5392 | skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT); |
5393 | skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT; | |
5394 | } | |
5395 | ||
b8df6647 | 5396 | return pci_register_driver(&skd_driver); |
e67f86b3 AB |
5397 | } |
5398 | ||
5399 | static void __exit skd_exit(void) | |
5400 | { | |
5401 | pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID); | |
5402 | ||
e67f86b3 | 5403 | pci_unregister_driver(&skd_driver); |
b8df6647 BZ |
5404 | |
5405 | if (skd_major) | |
5406 | unregister_blkdev(skd_major, DRV_NAME); | |
e67f86b3 AB |
5407 | } |
5408 | ||
e67f86b3 AB |
5409 | module_init(skd_init); |
5410 | module_exit(skd_exit); |