selinux: fix overflow and 0 length allocations
[deliverable/linux.git] / drivers / block / mtip32xx / mtip32xx.c
1 /*
2 * Driver for the Micron P320 SSD
3 * Copyright (C) 2011 Micron Technology, Inc.
4 *
5 * Portions of this code were derived from works subjected to the
6 * following copyright:
7 * Copyright (C) 2009 Integrated Device Technology, Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 */
20
21 #include <linux/pci.h>
22 #include <linux/interrupt.h>
23 #include <linux/ata.h>
24 #include <linux/delay.h>
25 #include <linux/hdreg.h>
26 #include <linux/uaccess.h>
27 #include <linux/random.h>
28 #include <linux/smp.h>
29 #include <linux/compat.h>
30 #include <linux/fs.h>
31 #include <linux/module.h>
32 #include <linux/genhd.h>
33 #include <linux/blkdev.h>
34 #include <linux/blk-mq.h>
35 #include <linux/bio.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/idr.h>
38 #include <linux/kthread.h>
39 #include <../drivers/ata/ahci.h>
40 #include <linux/export.h>
41 #include <linux/debugfs.h>
42 #include <linux/prefetch.h>
43 #include "mtip32xx.h"
44
45 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
46
47 /* DMA region containing RX Fis, Identify, RLE10, and SMART buffers */
48 #define AHCI_RX_FIS_SZ 0x100
49 #define AHCI_RX_FIS_OFFSET 0x0
50 #define AHCI_IDFY_SZ ATA_SECT_SIZE
51 #define AHCI_IDFY_OFFSET 0x400
52 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
53 #define AHCI_SECTBUF_OFFSET 0x800
54 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
55 #define AHCI_SMARTBUF_OFFSET 0xC00
56 /* 0x100 + 0x200 + 0x200 + 0x200 is smaller than 4k but we pad it out */
57 #define BLOCK_DMA_ALLOC_SZ 4096
58
59 /* DMA region containing command table (should be 8192 bytes) */
60 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
61 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
62 #define AHCI_CMD_TBL_OFFSET 0x0
63
64 /* DMA region per command (contains header and SGL) */
65 #define AHCI_CMD_TBL_HDR_SZ 0x80
66 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
67 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
68 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
69 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
70
71
72 #define HOST_CAP_NZDMA (1 << 19)
73 #define HOST_HSORG 0xFC
74 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
75 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
76 #define HSORG_HWREV 0xFF00
77 #define HSORG_STYLE 0x8
78 #define HSORG_SLOTGROUPS 0x7
79
80 #define PORT_COMMAND_ISSUE 0x38
81 #define PORT_SDBV 0x7C
82
83 #define PORT_OFFSET 0x100
84 #define PORT_MEM_SIZE 0x80
85
86 #define PORT_IRQ_ERR \
87 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
88 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
89 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
90 PORT_IRQ_OVERFLOW)
91 #define PORT_IRQ_LEGACY \
92 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
93 #define PORT_IRQ_HANDLED \
94 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
95 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
96 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
97 #define DEF_PORT_IRQ \
98 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
99
100 /* product numbers */
101 #define MTIP_PRODUCT_UNKNOWN 0x00
102 #define MTIP_PRODUCT_ASICFPGA 0x11
103
104 /* Device instance number, incremented each time a device is probed. */
105 static int instance;
106
107 static struct list_head online_list;
108 static struct list_head removing_list;
109 static spinlock_t dev_lock;
110
111 /*
112 * Global variable used to hold the major block device number
113 * allocated in mtip_init().
114 */
115 static int mtip_major;
116 static struct dentry *dfs_parent;
117 static struct dentry *dfs_device_status;
118
119 static u32 cpu_use[NR_CPUS];
120
121 static DEFINE_SPINLOCK(rssd_index_lock);
122 static DEFINE_IDA(rssd_index_ida);
123
124 static int mtip_block_initialize(struct driver_data *dd);
125
126 #ifdef CONFIG_COMPAT
127 struct mtip_compat_ide_task_request_s {
128 __u8 io_ports[8];
129 __u8 hob_ports[8];
130 ide_reg_valid_t out_flags;
131 ide_reg_valid_t in_flags;
132 int data_phase;
133 int req_cmd;
134 compat_ulong_t out_size;
135 compat_ulong_t in_size;
136 };
137 #endif
138
139 /*
140 * This function check_for_surprise_removal is called
141 * while card is removed from the system and it will
142 * read the vendor id from the configration space
143 *
144 * @pdev Pointer to the pci_dev structure.
145 *
146 * return value
147 * true if device removed, else false
148 */
149 static bool mtip_check_surprise_removal(struct pci_dev *pdev)
150 {
151 u16 vendor_id = 0;
152 struct driver_data *dd = pci_get_drvdata(pdev);
153
154 if (dd->sr)
155 return true;
156
157 /* Read the vendorID from the configuration space */
158 pci_read_config_word(pdev, 0x00, &vendor_id);
159 if (vendor_id == 0xFFFF) {
160 dd->sr = true;
161 if (dd->queue)
162 set_bit(QUEUE_FLAG_DEAD, &dd->queue->queue_flags);
163 else
164 dev_warn(&dd->pdev->dev,
165 "%s: dd->queue is NULL\n", __func__);
166 return true; /* device removed */
167 }
168
169 return false; /* device present */
170 }
171
172 static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
173 {
174 struct request *rq;
175
176 if (mtip_check_surprise_removal(dd->pdev))
177 return NULL;
178
179 rq = blk_mq_alloc_request(dd->queue, 0, BLK_MQ_REQ_RESERVED);
180 if (IS_ERR(rq))
181 return NULL;
182
183 return blk_mq_rq_to_pdu(rq);
184 }
185
186 static void mtip_put_int_command(struct driver_data *dd, struct mtip_cmd *cmd)
187 {
188 blk_put_request(blk_mq_rq_from_pdu(cmd));
189 }
190
191 /*
192 * Once we add support for one hctx per mtip group, this will change a bit
193 */
194 static struct request *mtip_rq_from_tag(struct driver_data *dd,
195 unsigned int tag)
196 {
197 struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];
198
199 return blk_mq_tag_to_rq(hctx->tags, tag);
200 }
201
202 static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
203 unsigned int tag)
204 {
205 struct request *rq = mtip_rq_from_tag(dd, tag);
206
207 return blk_mq_rq_to_pdu(rq);
208 }
209
210 /*
211 * IO completion function.
212 *
213 * This completion function is called by the driver ISR when a
214 * command that was issued by the kernel completes. It first calls the
215 * asynchronous completion function which normally calls back into the block
216 * layer passing the asynchronous callback data, then unmaps the
217 * scatter list associated with the completed command, and finally
218 * clears the allocated bit associated with the completed command.
219 *
220 * @port Pointer to the port data structure.
221 * @tag Tag of the command.
222 * @data Pointer to driver_data.
223 * @status Completion status.
224 *
225 * return value
226 * None
227 */
228 static void mtip_async_complete(struct mtip_port *port,
229 int tag, struct mtip_cmd *cmd, int status)
230 {
231 struct driver_data *dd = port->dd;
232 struct request *rq;
233
234 if (unlikely(!dd) || unlikely(!port))
235 return;
236
237 if (unlikely(status == PORT_IRQ_TF_ERR)) {
238 dev_warn(&port->dd->pdev->dev,
239 "Command tag %d failed due to TFE\n", tag);
240 }
241
242 rq = mtip_rq_from_tag(dd, tag);
243
244 blk_mq_complete_request(rq, status);
245 }
246
247 /*
248 * Reset the HBA (without sleeping)
249 *
250 * @dd Pointer to the driver data structure.
251 *
252 * return value
253 * 0 The reset was successful.
254 * -1 The HBA Reset bit did not clear.
255 */
256 static int mtip_hba_reset(struct driver_data *dd)
257 {
258 unsigned long timeout;
259
260 /* Set the reset bit */
261 writel(HOST_RESET, dd->mmio + HOST_CTL);
262
263 /* Flush */
264 readl(dd->mmio + HOST_CTL);
265
266 /*
267 * Spin for up to 10 seconds waiting for reset acknowledgement. Spec
268 * is 1 sec but in LUN failure conditions, up to 10 secs are required
269 */
270 timeout = jiffies + msecs_to_jiffies(10000);
271 do {
272 mdelay(10);
273 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
274 return -1;
275
276 } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
277 && time_before(jiffies, timeout));
278
279 if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
280 return -1;
281
282 return 0;
283 }
284
285 /*
286 * Issue a command to the hardware.
287 *
288 * Set the appropriate bit in the s_active and Command Issue hardware
289 * registers, causing hardware command processing to begin.
290 *
291 * @port Pointer to the port structure.
292 * @tag The tag of the command to be issued.
293 *
294 * return value
295 * None
296 */
297 static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
298 {
299 int group = tag >> 5;
300
301 /* guard SACT and CI registers */
302 spin_lock(&port->cmd_issue_lock[group]);
303 writel((1 << MTIP_TAG_BIT(tag)),
304 port->s_active[MTIP_TAG_INDEX(tag)]);
305 writel((1 << MTIP_TAG_BIT(tag)),
306 port->cmd_issue[MTIP_TAG_INDEX(tag)]);
307 spin_unlock(&port->cmd_issue_lock[group]);
308 }
309
310 /*
311 * Enable/disable the reception of FIS
312 *
313 * @port Pointer to the port data structure
314 * @enable 1 to enable, 0 to disable
315 *
316 * return value
317 * Previous state: 1 enabled, 0 disabled
318 */
319 static int mtip_enable_fis(struct mtip_port *port, int enable)
320 {
321 u32 tmp;
322
323 /* enable FIS reception */
324 tmp = readl(port->mmio + PORT_CMD);
325 if (enable)
326 writel(tmp | PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
327 else
328 writel(tmp & ~PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
329
330 /* Flush */
331 readl(port->mmio + PORT_CMD);
332
333 return (((tmp & PORT_CMD_FIS_RX) == PORT_CMD_FIS_RX));
334 }
335
336 /*
337 * Enable/disable the DMA engine
338 *
339 * @port Pointer to the port data structure
340 * @enable 1 to enable, 0 to disable
341 *
342 * return value
343 * Previous state: 1 enabled, 0 disabled.
344 */
345 static int mtip_enable_engine(struct mtip_port *port, int enable)
346 {
347 u32 tmp;
348
349 /* enable FIS reception */
350 tmp = readl(port->mmio + PORT_CMD);
351 if (enable)
352 writel(tmp | PORT_CMD_START, port->mmio + PORT_CMD);
353 else
354 writel(tmp & ~PORT_CMD_START, port->mmio + PORT_CMD);
355
356 readl(port->mmio + PORT_CMD);
357 return (((tmp & PORT_CMD_START) == PORT_CMD_START));
358 }
359
360 /*
361 * Enables the port DMA engine and FIS reception.
362 *
363 * return value
364 * None
365 */
366 static inline void mtip_start_port(struct mtip_port *port)
367 {
368 /* Enable FIS reception */
369 mtip_enable_fis(port, 1);
370
371 /* Enable the DMA engine */
372 mtip_enable_engine(port, 1);
373 }
374
375 /*
376 * Deinitialize a port by disabling port interrupts, the DMA engine,
377 * and FIS reception.
378 *
379 * @port Pointer to the port structure
380 *
381 * return value
382 * None
383 */
384 static inline void mtip_deinit_port(struct mtip_port *port)
385 {
386 /* Disable interrupts on this port */
387 writel(0, port->mmio + PORT_IRQ_MASK);
388
389 /* Disable the DMA engine */
390 mtip_enable_engine(port, 0);
391
392 /* Disable FIS reception */
393 mtip_enable_fis(port, 0);
394 }
395
396 /*
397 * Initialize a port.
398 *
399 * This function deinitializes the port by calling mtip_deinit_port() and
400 * then initializes it by setting the command header and RX FIS addresses,
401 * clearing the SError register and any pending port interrupts before
402 * re-enabling the default set of port interrupts.
403 *
404 * @port Pointer to the port structure.
405 *
406 * return value
407 * None
408 */
409 static void mtip_init_port(struct mtip_port *port)
410 {
411 int i;
412 mtip_deinit_port(port);
413
414 /* Program the command list base and FIS base addresses */
415 if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) {
416 writel((port->command_list_dma >> 16) >> 16,
417 port->mmio + PORT_LST_ADDR_HI);
418 writel((port->rxfis_dma >> 16) >> 16,
419 port->mmio + PORT_FIS_ADDR_HI);
420 }
421
422 writel(port->command_list_dma & 0xFFFFFFFF,
423 port->mmio + PORT_LST_ADDR);
424 writel(port->rxfis_dma & 0xFFFFFFFF, port->mmio + PORT_FIS_ADDR);
425
426 /* Clear SError */
427 writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
428
429 /* reset the completed registers.*/
430 for (i = 0; i < port->dd->slot_groups; i++)
431 writel(0xFFFFFFFF, port->completed[i]);
432
433 /* Clear any pending interrupts for this port */
434 writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
435
436 /* Clear any pending interrupts on the HBA. */
437 writel(readl(port->dd->mmio + HOST_IRQ_STAT),
438 port->dd->mmio + HOST_IRQ_STAT);
439
440 /* Enable port interrupts */
441 writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
442 }
443
444 /*
445 * Restart a port
446 *
447 * @port Pointer to the port data structure.
448 *
449 * return value
450 * None
451 */
452 static void mtip_restart_port(struct mtip_port *port)
453 {
454 unsigned long timeout;
455
456 /* Disable the DMA engine */
457 mtip_enable_engine(port, 0);
458
459 /* Chip quirk: wait up to 500ms for PxCMD.CR == 0 */
460 timeout = jiffies + msecs_to_jiffies(500);
461 while ((readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON)
462 && time_before(jiffies, timeout))
463 ;
464
465 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
466 return;
467
468 /*
469 * Chip quirk: escalate to hba reset if
470 * PxCMD.CR not clear after 500 ms
471 */
472 if (readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) {
473 dev_warn(&port->dd->pdev->dev,
474 "PxCMD.CR not clear, escalating reset\n");
475
476 if (mtip_hba_reset(port->dd))
477 dev_err(&port->dd->pdev->dev,
478 "HBA reset escalation failed.\n");
479
480 /* 30 ms delay before com reset to quiesce chip */
481 mdelay(30);
482 }
483
484 dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n");
485
486 /* Set PxSCTL.DET */
487 writel(readl(port->mmio + PORT_SCR_CTL) |
488 1, port->mmio + PORT_SCR_CTL);
489 readl(port->mmio + PORT_SCR_CTL);
490
491 /* Wait 1 ms to quiesce chip function */
492 timeout = jiffies + msecs_to_jiffies(1);
493 while (time_before(jiffies, timeout))
494 ;
495
496 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
497 return;
498
499 /* Clear PxSCTL.DET */
500 writel(readl(port->mmio + PORT_SCR_CTL) & ~1,
501 port->mmio + PORT_SCR_CTL);
502 readl(port->mmio + PORT_SCR_CTL);
503
504 /* Wait 500 ms for bit 0 of PORT_SCR_STS to be set */
505 timeout = jiffies + msecs_to_jiffies(500);
506 while (((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
507 && time_before(jiffies, timeout))
508 ;
509
510 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
511 return;
512
513 if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
514 dev_warn(&port->dd->pdev->dev,
515 "COM reset failed\n");
516
517 mtip_init_port(port);
518 mtip_start_port(port);
519
520 }
521
522 static int mtip_device_reset(struct driver_data *dd)
523 {
524 int rv = 0;
525
526 if (mtip_check_surprise_removal(dd->pdev))
527 return 0;
528
529 if (mtip_hba_reset(dd) < 0)
530 rv = -EFAULT;
531
532 mdelay(1);
533 mtip_init_port(dd->port);
534 mtip_start_port(dd->port);
535
536 /* Enable interrupts on the HBA. */
537 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
538 dd->mmio + HOST_CTL);
539 return rv;
540 }
541
542 /*
543 * Helper function for tag logging
544 */
545 static void print_tags(struct driver_data *dd,
546 char *msg,
547 unsigned long *tagbits,
548 int cnt)
549 {
550 unsigned char tagmap[128];
551 int group, tagmap_len = 0;
552
553 memset(tagmap, 0, sizeof(tagmap));
554 for (group = SLOTBITS_IN_LONGS; group > 0; group--)
555 tagmap_len += sprintf(tagmap + tagmap_len, "%016lX ",
556 tagbits[group-1]);
557 dev_warn(&dd->pdev->dev,
558 "%d command(s) %s: tagmap [%s]", cnt, msg, tagmap);
559 }
560
561 /*
562 * Internal command completion callback function.
563 *
564 * This function is normally called by the driver ISR when an internal
565 * command completed. This function signals the command completion by
566 * calling complete().
567 *
568 * @port Pointer to the port data structure.
569 * @tag Tag of the command that has completed.
570 * @data Pointer to a completion structure.
571 * @status Completion status.
572 *
573 * return value
574 * None
575 */
576 static void mtip_completion(struct mtip_port *port,
577 int tag, struct mtip_cmd *command, int status)
578 {
579 struct completion *waiting = command->comp_data;
580 if (unlikely(status == PORT_IRQ_TF_ERR))
581 dev_warn(&port->dd->pdev->dev,
582 "Internal command %d completed with TFE\n", tag);
583
584 command->comp_func = NULL;
585 command->comp_data = NULL;
586 complete(waiting);
587 }
588
589 static void mtip_null_completion(struct mtip_port *port,
590 int tag, struct mtip_cmd *command, int status)
591 {
592 }
593
594 static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
595 dma_addr_t buffer_dma, unsigned int sectors);
596 static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
597 struct smart_attr *attrib);
598 /*
599 * Handle an error.
600 *
601 * @dd Pointer to the DRIVER_DATA structure.
602 *
603 * return value
604 * None
605 */
606 static void mtip_handle_tfe(struct driver_data *dd)
607 {
608 int group, tag, bit, reissue, rv;
609 struct mtip_port *port;
610 struct mtip_cmd *cmd;
611 u32 completed;
612 struct host_to_dev_fis *fis;
613 unsigned long tagaccum[SLOTBITS_IN_LONGS];
614 unsigned int cmd_cnt = 0;
615 unsigned char *buf;
616 char *fail_reason = NULL;
617 int fail_all_ncq_write = 0, fail_all_ncq_cmds = 0;
618
619 dev_warn(&dd->pdev->dev, "Taskfile error\n");
620
621 port = dd->port;
622
623 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
624 cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
625 dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
626
627 if (cmd->comp_data && cmd->comp_func) {
628 cmd->comp_func(port, MTIP_TAG_INTERNAL,
629 cmd, PORT_IRQ_TF_ERR);
630 }
631 return;
632 }
633
634 /* clear the tag accumulator */
635 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
636
637 /* Loop through all the groups */
638 for (group = 0; group < dd->slot_groups; group++) {
639 completed = readl(port->completed[group]);
640
641 dev_warn(&dd->pdev->dev, "g=%u, comp=%x\n", group, completed);
642
643 /* clear completed status register in the hardware.*/
644 writel(completed, port->completed[group]);
645
646 /* Process successfully completed commands */
647 for (bit = 0; bit < 32 && completed; bit++) {
648 if (!(completed & (1<<bit)))
649 continue;
650 tag = (group << 5) + bit;
651
652 /* Skip the internal command slot */
653 if (tag == MTIP_TAG_INTERNAL)
654 continue;
655
656 cmd = mtip_cmd_from_tag(dd, tag);
657 if (likely(cmd->comp_func)) {
658 set_bit(tag, tagaccum);
659 cmd_cnt++;
660 cmd->comp_func(port, tag, cmd, 0);
661 } else {
662 dev_err(&port->dd->pdev->dev,
663 "Missing completion func for tag %d",
664 tag);
665 if (mtip_check_surprise_removal(dd->pdev)) {
666 /* don't proceed further */
667 return;
668 }
669 }
670 }
671 }
672
673 print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt);
674
675 /* Restart the port */
676 mdelay(20);
677 mtip_restart_port(port);
678
679 /* Trying to determine the cause of the error */
680 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
681 dd->port->log_buf,
682 dd->port->log_buf_dma, 1);
683 if (rv) {
684 dev_warn(&dd->pdev->dev,
685 "Error in READ LOG EXT (10h) command\n");
686 /* non-critical error, don't fail the load */
687 } else {
688 buf = (unsigned char *)dd->port->log_buf;
689 if (buf[259] & 0x1) {
690 dev_info(&dd->pdev->dev,
691 "Write protect bit is set.\n");
692 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
693 fail_all_ncq_write = 1;
694 fail_reason = "write protect";
695 }
696 if (buf[288] == 0xF7) {
697 dev_info(&dd->pdev->dev,
698 "Exceeded Tmax, drive in thermal shutdown.\n");
699 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
700 fail_all_ncq_cmds = 1;
701 fail_reason = "thermal shutdown";
702 }
703 if (buf[288] == 0xBF) {
704 set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
705 dev_info(&dd->pdev->dev,
706 "Drive indicates rebuild has failed. Secure erase required.\n");
707 fail_all_ncq_cmds = 1;
708 fail_reason = "rebuild failed";
709 }
710 }
711
712 /* clear the tag accumulator */
713 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
714
715 /* Loop through all the groups */
716 for (group = 0; group < dd->slot_groups; group++) {
717 for (bit = 0; bit < 32; bit++) {
718 reissue = 1;
719 tag = (group << 5) + bit;
720 cmd = mtip_cmd_from_tag(dd, tag);
721
722 fis = (struct host_to_dev_fis *)cmd->command;
723
724 /* Should re-issue? */
725 if (tag == MTIP_TAG_INTERNAL ||
726 fis->command == ATA_CMD_SET_FEATURES)
727 reissue = 0;
728 else {
729 if (fail_all_ncq_cmds ||
730 (fail_all_ncq_write &&
731 fis->command == ATA_CMD_FPDMA_WRITE)) {
732 dev_warn(&dd->pdev->dev,
733 " Fail: %s w/tag %d [%s].\n",
734 fis->command == ATA_CMD_FPDMA_WRITE ?
735 "write" : "read",
736 tag,
737 fail_reason != NULL ?
738 fail_reason : "unknown");
739 if (cmd->comp_func) {
740 cmd->comp_func(port, tag,
741 cmd, -ENODATA);
742 }
743 continue;
744 }
745 }
746
747 /*
748 * First check if this command has
749 * exceeded its retries.
750 */
751 if (reissue && (cmd->retries-- > 0)) {
752
753 set_bit(tag, tagaccum);
754
755 /* Re-issue the command. */
756 mtip_issue_ncq_command(port, tag);
757
758 continue;
759 }
760
761 /* Retire a command that will not be reissued */
762 dev_warn(&port->dd->pdev->dev,
763 "retiring tag %d\n", tag);
764
765 if (cmd->comp_func)
766 cmd->comp_func(port, tag, cmd, PORT_IRQ_TF_ERR);
767 else
768 dev_warn(&port->dd->pdev->dev,
769 "Bad completion for tag %d\n",
770 tag);
771 }
772 }
773 print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
774 }
775
776 /*
777 * Handle a set device bits interrupt
778 */
779 static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
780 u32 completed)
781 {
782 struct driver_data *dd = port->dd;
783 int tag, bit;
784 struct mtip_cmd *command;
785
786 if (!completed) {
787 WARN_ON_ONCE(!completed);
788 return;
789 }
790 /* clear completed status register in the hardware.*/
791 writel(completed, port->completed[group]);
792
793 /* Process completed commands. */
794 for (bit = 0; (bit < 32) && completed; bit++) {
795 if (completed & 0x01) {
796 tag = (group << 5) | bit;
797
798 /* skip internal command slot. */
799 if (unlikely(tag == MTIP_TAG_INTERNAL))
800 continue;
801
802 command = mtip_cmd_from_tag(dd, tag);
803 if (likely(command->comp_func))
804 command->comp_func(port, tag, command, 0);
805 else {
806 dev_dbg(&dd->pdev->dev,
807 "Null completion for tag %d",
808 tag);
809
810 if (mtip_check_surprise_removal(
811 dd->pdev)) {
812 return;
813 }
814 }
815 }
816 completed >>= 1;
817 }
818
819 /* If last, re-enable interrupts */
820 if (atomic_dec_return(&dd->irq_workers_active) == 0)
821 writel(0xffffffff, dd->mmio + HOST_IRQ_STAT);
822 }
823
824 /*
825 * Process legacy pio and d2h interrupts
826 */
827 static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
828 {
829 struct mtip_port *port = dd->port;
830 struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
831
832 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
833 (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
834 & (1 << MTIP_TAG_INTERNAL))) {
835 if (cmd->comp_func) {
836 cmd->comp_func(port, MTIP_TAG_INTERNAL, cmd, 0);
837 return;
838 }
839 }
840
841 return;
842 }
843
844 /*
845 * Demux and handle errors
846 */
847 static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
848 {
849
850 if (unlikely(port_stat & PORT_IRQ_CONNECT)) {
851 dev_warn(&dd->pdev->dev,
852 "Clearing PxSERR.DIAG.x\n");
853 writel((1 << 26), dd->port->mmio + PORT_SCR_ERR);
854 }
855
856 if (unlikely(port_stat & PORT_IRQ_PHYRDY)) {
857 dev_warn(&dd->pdev->dev,
858 "Clearing PxSERR.DIAG.n\n");
859 writel((1 << 16), dd->port->mmio + PORT_SCR_ERR);
860 }
861
862 if (unlikely(port_stat & ~PORT_IRQ_HANDLED)) {
863 dev_warn(&dd->pdev->dev,
864 "Port stat errors %x unhandled\n",
865 (port_stat & ~PORT_IRQ_HANDLED));
866 if (mtip_check_surprise_removal(dd->pdev))
867 return;
868 }
869 if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) {
870 set_bit(MTIP_PF_EH_ACTIVE_BIT, &dd->port->flags);
871 wake_up_interruptible(&dd->port->svc_wait);
872 }
873 }
874
875 static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
876 {
877 struct driver_data *dd = (struct driver_data *) data;
878 struct mtip_port *port = dd->port;
879 u32 hba_stat, port_stat;
880 int rv = IRQ_NONE;
881 int do_irq_enable = 1, i, workers;
882 struct mtip_work *twork;
883
884 hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
885 if (hba_stat) {
886 rv = IRQ_HANDLED;
887
888 /* Acknowledge the interrupt status on the port.*/
889 port_stat = readl(port->mmio + PORT_IRQ_STAT);
890 if (unlikely(port_stat == 0xFFFFFFFF)) {
891 mtip_check_surprise_removal(dd->pdev);
892 return IRQ_HANDLED;
893 }
894 writel(port_stat, port->mmio + PORT_IRQ_STAT);
895
896 /* Demux port status */
897 if (likely(port_stat & PORT_IRQ_SDB_FIS)) {
898 do_irq_enable = 0;
899 WARN_ON_ONCE(atomic_read(&dd->irq_workers_active) != 0);
900
901 /* Start at 1: group zero is always local? */
902 for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS;
903 i++) {
904 twork = &dd->work[i];
905 twork->completed = readl(port->completed[i]);
906 if (twork->completed)
907 workers++;
908 }
909
910 atomic_set(&dd->irq_workers_active, workers);
911 if (workers) {
912 for (i = 1; i < MTIP_MAX_SLOT_GROUPS; i++) {
913 twork = &dd->work[i];
914 if (twork->completed)
915 queue_work_on(
916 twork->cpu_binding,
917 dd->isr_workq,
918 &twork->work);
919 }
920
921 if (likely(dd->work[0].completed))
922 mtip_workq_sdbfx(port, 0,
923 dd->work[0].completed);
924
925 } else {
926 /*
927 * Chip quirk: SDB interrupt but nothing
928 * to complete
929 */
930 do_irq_enable = 1;
931 }
932 }
933
934 if (unlikely(port_stat & PORT_IRQ_ERR)) {
935 if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
936 /* don't proceed further */
937 return IRQ_HANDLED;
938 }
939 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
940 &dd->dd_flag))
941 return rv;
942
943 mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
944 }
945
946 if (unlikely(port_stat & PORT_IRQ_LEGACY))
947 mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY);
948 }
949
950 /* acknowledge interrupt */
951 if (unlikely(do_irq_enable))
952 writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
953
954 return rv;
955 }
956
957 /*
958 * HBA interrupt subroutine.
959 *
960 * @irq IRQ number.
961 * @instance Pointer to the driver data structure.
962 *
963 * return value
964 * IRQ_HANDLED A HBA interrupt was pending and handled.
965 * IRQ_NONE This interrupt was not for the HBA.
966 */
967 static irqreturn_t mtip_irq_handler(int irq, void *instance)
968 {
969 struct driver_data *dd = instance;
970
971 return mtip_handle_irq(dd);
972 }
973
974 static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
975 {
976 writel(1 << MTIP_TAG_BIT(tag),
977 port->cmd_issue[MTIP_TAG_INDEX(tag)]);
978 }
979
980 static bool mtip_pause_ncq(struct mtip_port *port,
981 struct host_to_dev_fis *fis)
982 {
983 struct host_to_dev_fis *reply;
984 unsigned long task_file_data;
985
986 reply = port->rxfis + RX_FIS_D2H_REG;
987 task_file_data = readl(port->mmio+PORT_TFDATA);
988
989 if ((task_file_data & 1))
990 return false;
991
992 if (fis->command == ATA_CMD_SEC_ERASE_PREP) {
993 port->ic_pause_timer = jiffies;
994 return true;
995 } else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) &&
996 (fis->features == 0x03)) {
997 set_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
998 port->ic_pause_timer = jiffies;
999 return true;
1000 } else if ((fis->command == ATA_CMD_SEC_ERASE_UNIT) ||
1001 ((fis->command == 0xFC) &&
1002 (fis->features == 0x27 || fis->features == 0x72 ||
1003 fis->features == 0x62 || fis->features == 0x26))) {
1004 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
1005 clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
1006 /* Com reset after secure erase or lowlevel format */
1007 mtip_restart_port(port);
1008 clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
1009 return false;
1010 }
1011
1012 return false;
1013 }
1014
1015 /*
1016 * Wait for port to quiesce
1017 *
1018 * @port Pointer to port data structure
1019 * @timeout Max duration to wait (ms)
1020 * @atomic gfp_t flag to indicate blockable context or not
1021 *
1022 * return value
1023 * 0 Success
1024 * -EBUSY Commands still active
1025 */
1026 static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout,
1027 gfp_t atomic)
1028 {
1029 unsigned long to;
1030 unsigned int n;
1031 unsigned int active = 1;
1032
1033 blk_mq_stop_hw_queues(port->dd->queue);
1034
1035 to = jiffies + msecs_to_jiffies(timeout);
1036 do {
1037 if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
1038 test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags) &&
1039 atomic == GFP_KERNEL) {
1040 msleep(20);
1041 continue; /* svc thd is actively issuing commands */
1042 }
1043
1044 if (atomic == GFP_KERNEL)
1045 msleep(100);
1046 else {
1047 cpu_relax();
1048 udelay(100);
1049 }
1050
1051 if (mtip_check_surprise_removal(port->dd->pdev))
1052 goto err_fault;
1053
1054 /*
1055 * Ignore s_active bit 0 of array element 0.
1056 * This bit will always be set
1057 */
1058 active = readl(port->s_active[0]) & 0xFFFFFFFE;
1059 for (n = 1; n < port->dd->slot_groups; n++)
1060 active |= readl(port->s_active[n]);
1061
1062 if (!active)
1063 break;
1064 } while (time_before(jiffies, to));
1065
1066 blk_mq_start_stopped_hw_queues(port->dd->queue, true);
1067 return active ? -EBUSY : 0;
1068 err_fault:
1069 blk_mq_start_stopped_hw_queues(port->dd->queue, true);
1070 return -EFAULT;
1071 }
1072
1073 /*
1074 * Execute an internal command and wait for the completion.
1075 *
1076 * @port Pointer to the port data structure.
1077 * @fis Pointer to the FIS that describes the command.
1078 * @fis_len Length in WORDS of the FIS.
1079 * @buffer DMA accessible for command data.
1080 * @buf_len Length, in bytes, of the data buffer.
1081 * @opts Command header options, excluding the FIS length
1082 * and the number of PRD entries.
1083 * @timeout Time in ms to wait for the command to complete.
1084 *
1085 * return value
1086 * 0 Command completed successfully.
1087 * -EFAULT The buffer address is not correctly aligned.
1088 * -EBUSY Internal command or other IO in progress.
1089 * -EAGAIN Time out waiting for command to complete.
1090 */
1091 static int mtip_exec_internal_command(struct mtip_port *port,
1092 struct host_to_dev_fis *fis,
1093 int fis_len,
1094 dma_addr_t buffer,
1095 int buf_len,
1096 u32 opts,
1097 gfp_t atomic,
1098 unsigned long timeout)
1099 {
1100 struct mtip_cmd_sg *command_sg;
1101 DECLARE_COMPLETION_ONSTACK(wait);
1102 struct mtip_cmd *int_cmd;
1103 struct driver_data *dd = port->dd;
1104 int rv = 0;
1105 unsigned long start;
1106
1107 /* Make sure the buffer is 8 byte aligned. This is asic specific. */
1108 if (buffer & 0x00000007) {
1109 dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n");
1110 return -EFAULT;
1111 }
1112
1113 int_cmd = mtip_get_int_command(dd);
1114 if (!int_cmd) {
1115 dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
1116 return -EFAULT;
1117 }
1118
1119 set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1120
1121 if (fis->command == ATA_CMD_SEC_ERASE_PREP)
1122 set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
1123
1124 clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
1125
1126 if (atomic == GFP_KERNEL) {
1127 if (fis->command != ATA_CMD_STANDBYNOW1) {
1128 /* wait for io to complete if non atomic */
1129 if (mtip_quiesce_io(port,
1130 MTIP_QUIESCE_IO_TIMEOUT_MS, atomic) < 0) {
1131 dev_warn(&dd->pdev->dev,
1132 "Failed to quiesce IO\n");
1133 mtip_put_int_command(dd, int_cmd);
1134 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1135 wake_up_interruptible(&port->svc_wait);
1136 return -EBUSY;
1137 }
1138 }
1139
1140 /* Set the completion function and data for the command. */
1141 int_cmd->comp_data = &wait;
1142 int_cmd->comp_func = mtip_completion;
1143
1144 } else {
1145 /* Clear completion - we're going to poll */
1146 int_cmd->comp_data = NULL;
1147 int_cmd->comp_func = mtip_null_completion;
1148 }
1149
1150 /* Copy the command to the command table */
1151 memcpy(int_cmd->command, fis, fis_len*4);
1152
1153 /* Populate the SG list */
1154 int_cmd->command_header->opts =
1155 __force_bit2int cpu_to_le32(opts | fis_len);
1156 if (buf_len) {
1157 command_sg = int_cmd->command + AHCI_CMD_TBL_HDR_SZ;
1158
1159 command_sg->info =
1160 __force_bit2int cpu_to_le32((buf_len-1) & 0x3FFFFF);
1161 command_sg->dba =
1162 __force_bit2int cpu_to_le32(buffer & 0xFFFFFFFF);
1163 command_sg->dba_upper =
1164 __force_bit2int cpu_to_le32((buffer >> 16) >> 16);
1165
1166 int_cmd->command_header->opts |=
1167 __force_bit2int cpu_to_le32((1 << 16));
1168 }
1169
1170 /* Populate the command header */
1171 int_cmd->command_header->byte_count = 0;
1172
1173 start = jiffies;
1174
1175 /* Issue the command to the hardware */
1176 mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
1177
1178 if (atomic == GFP_KERNEL) {
1179 /* Wait for the command to complete or timeout. */
1180 if ((rv = wait_for_completion_interruptible_timeout(
1181 &wait,
1182 msecs_to_jiffies(timeout))) <= 0) {
1183
1184 if (rv == -ERESTARTSYS) { /* interrupted */
1185 dev_err(&dd->pdev->dev,
1186 "Internal command [%02X] was interrupted after %u ms\n",
1187 fis->command,
1188 jiffies_to_msecs(jiffies - start));
1189 rv = -EINTR;
1190 goto exec_ic_exit;
1191 } else if (rv == 0) /* timeout */
1192 dev_err(&dd->pdev->dev,
1193 "Internal command did not complete [%02X] within timeout of %lu ms\n",
1194 fis->command, timeout);
1195 else
1196 dev_err(&dd->pdev->dev,
1197 "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n",
1198 fis->command, rv, timeout);
1199
1200 if (mtip_check_surprise_removal(dd->pdev) ||
1201 test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1202 &dd->dd_flag)) {
1203 dev_err(&dd->pdev->dev,
1204 "Internal command [%02X] wait returned due to SR\n",
1205 fis->command);
1206 rv = -ENXIO;
1207 goto exec_ic_exit;
1208 }
1209 mtip_device_reset(dd); /* recover from timeout issue */
1210 rv = -EAGAIN;
1211 goto exec_ic_exit;
1212 }
1213 } else {
1214 u32 hba_stat, port_stat;
1215
1216 /* Spin for <timeout> checking if command still outstanding */
1217 timeout = jiffies + msecs_to_jiffies(timeout);
1218 while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1219 & (1 << MTIP_TAG_INTERNAL))
1220 && time_before(jiffies, timeout)) {
1221 if (mtip_check_surprise_removal(dd->pdev)) {
1222 rv = -ENXIO;
1223 goto exec_ic_exit;
1224 }
1225 if ((fis->command != ATA_CMD_STANDBYNOW1) &&
1226 test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1227 &dd->dd_flag)) {
1228 rv = -ENXIO;
1229 goto exec_ic_exit;
1230 }
1231 port_stat = readl(port->mmio + PORT_IRQ_STAT);
1232 if (!port_stat)
1233 continue;
1234
1235 if (port_stat & PORT_IRQ_ERR) {
1236 dev_err(&dd->pdev->dev,
1237 "Internal command [%02X] failed\n",
1238 fis->command);
1239 mtip_device_reset(dd);
1240 rv = -EIO;
1241 goto exec_ic_exit;
1242 } else {
1243 writel(port_stat, port->mmio + PORT_IRQ_STAT);
1244 hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
1245 if (hba_stat)
1246 writel(hba_stat,
1247 dd->mmio + HOST_IRQ_STAT);
1248 }
1249 break;
1250 }
1251 }
1252
1253 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1254 & (1 << MTIP_TAG_INTERNAL)) {
1255 rv = -ENXIO;
1256 if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
1257 mtip_device_reset(dd);
1258 rv = -EAGAIN;
1259 }
1260 }
1261 exec_ic_exit:
1262 /* Clear the allocated and active bits for the internal command. */
1263 mtip_put_int_command(dd, int_cmd);
1264 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1265 if (rv >= 0 && mtip_pause_ncq(port, fis)) {
1266 /* NCQ paused */
1267 return rv;
1268 }
1269 wake_up_interruptible(&port->svc_wait);
1270
1271 return rv;
1272 }
1273
1274 /*
1275 * Byte-swap ATA ID strings.
1276 *
1277 * ATA identify data contains strings in byte-swapped 16-bit words.
1278 * They must be swapped (on all architectures) to be usable as C strings.
1279 * This function swaps bytes in-place.
1280 *
1281 * @buf The buffer location of the string
1282 * @len The number of bytes to swap
1283 *
1284 * return value
1285 * None
1286 */
1287 static inline void ata_swap_string(u16 *buf, unsigned int len)
1288 {
1289 int i;
1290 for (i = 0; i < (len/2); i++)
1291 be16_to_cpus(&buf[i]);
1292 }
1293
1294 static void mtip_set_timeout(struct driver_data *dd,
1295 struct host_to_dev_fis *fis,
1296 unsigned int *timeout, u8 erasemode)
1297 {
1298 switch (fis->command) {
1299 case ATA_CMD_DOWNLOAD_MICRO:
1300 *timeout = 120000; /* 2 minutes */
1301 break;
1302 case ATA_CMD_SEC_ERASE_UNIT:
1303 case 0xFC:
1304 if (erasemode)
1305 *timeout = ((*(dd->port->identify + 90) * 2) * 60000);
1306 else
1307 *timeout = ((*(dd->port->identify + 89) * 2) * 60000);
1308 break;
1309 case ATA_CMD_STANDBYNOW1:
1310 *timeout = 120000; /* 2 minutes */
1311 break;
1312 case 0xF7:
1313 case 0xFA:
1314 *timeout = 60000; /* 60 seconds */
1315 break;
1316 case ATA_CMD_SMART:
1317 *timeout = 15000; /* 15 seconds */
1318 break;
1319 default:
1320 *timeout = MTIP_IOCTL_CMD_TIMEOUT_MS;
1321 break;
1322 }
1323 }
1324
1325 /*
1326 * Request the device identity information.
1327 *
1328 * If a user space buffer is not specified, i.e. is NULL, the
1329 * identify information is still read from the drive and placed
1330 * into the identify data buffer (@e port->identify) in the
1331 * port data structure.
1332 * When the identify buffer contains valid identify information @e
1333 * port->identify_valid is non-zero.
1334 *
1335 * @port Pointer to the port structure.
1336 * @user_buffer A user space buffer where the identify data should be
1337 * copied.
1338 *
1339 * return value
1340 * 0 Command completed successfully.
1341 * -EFAULT An error occurred while coping data to the user buffer.
1342 * -1 Command failed.
1343 */
1344 static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
1345 {
1346 int rv = 0;
1347 struct host_to_dev_fis fis;
1348
1349 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
1350 return -EFAULT;
1351
1352 /* Build the FIS. */
1353 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1354 fis.type = 0x27;
1355 fis.opts = 1 << 7;
1356 fis.command = ATA_CMD_ID_ATA;
1357
1358 /* Set the identify information as invalid. */
1359 port->identify_valid = 0;
1360
1361 /* Clear the identify information. */
1362 memset(port->identify, 0, sizeof(u16) * ATA_ID_WORDS);
1363
1364 /* Execute the command. */
1365 if (mtip_exec_internal_command(port,
1366 &fis,
1367 5,
1368 port->identify_dma,
1369 sizeof(u16) * ATA_ID_WORDS,
1370 0,
1371 GFP_KERNEL,
1372 MTIP_INT_CMD_TIMEOUT_MS)
1373 < 0) {
1374 rv = -1;
1375 goto out;
1376 }
1377
1378 /*
1379 * Perform any necessary byte-swapping. Yes, the kernel does in fact
1380 * perform field-sensitive swapping on the string fields.
1381 * See the kernel use of ata_id_string() for proof of this.
1382 */
1383 #ifdef __LITTLE_ENDIAN
1384 ata_swap_string(port->identify + 27, 40); /* model string*/
1385 ata_swap_string(port->identify + 23, 8); /* firmware string*/
1386 ata_swap_string(port->identify + 10, 20); /* serial# string*/
1387 #else
1388 {
1389 int i;
1390 for (i = 0; i < ATA_ID_WORDS; i++)
1391 port->identify[i] = le16_to_cpu(port->identify[i]);
1392 }
1393 #endif
1394
1395 /* Check security locked state */
1396 if (port->identify[128] & 0x4)
1397 set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
1398 else
1399 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
1400
1401 #ifdef MTIP_TRIM /* Disabling TRIM support temporarily */
1402 /* Demux ID.DRAT & ID.RZAT to determine trim support */
1403 if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5))
1404 port->dd->trim_supp = true;
1405 else
1406 #endif
1407 port->dd->trim_supp = false;
1408
1409 /* Set the identify buffer as valid. */
1410 port->identify_valid = 1;
1411
1412 if (user_buffer) {
1413 if (copy_to_user(
1414 user_buffer,
1415 port->identify,
1416 ATA_ID_WORDS * sizeof(u16))) {
1417 rv = -EFAULT;
1418 goto out;
1419 }
1420 }
1421
1422 out:
1423 return rv;
1424 }
1425
1426 /*
1427 * Issue a standby immediate command to the device.
1428 *
1429 * @port Pointer to the port structure.
1430 *
1431 * return value
1432 * 0 Command was executed successfully.
1433 * -1 An error occurred while executing the command.
1434 */
1435 static int mtip_standby_immediate(struct mtip_port *port)
1436 {
1437 int rv;
1438 struct host_to_dev_fis fis;
1439 unsigned long start;
1440 unsigned int timeout;
1441
1442 /* Build the FIS. */
1443 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1444 fis.type = 0x27;
1445 fis.opts = 1 << 7;
1446 fis.command = ATA_CMD_STANDBYNOW1;
1447
1448 mtip_set_timeout(port->dd, &fis, &timeout, 0);
1449
1450 start = jiffies;
1451 rv = mtip_exec_internal_command(port,
1452 &fis,
1453 5,
1454 0,
1455 0,
1456 0,
1457 GFP_ATOMIC,
1458 timeout);
1459 dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
1460 jiffies_to_msecs(jiffies - start));
1461 if (rv)
1462 dev_warn(&port->dd->pdev->dev,
1463 "STANDBY IMMEDIATE command failed.\n");
1464
1465 return rv;
1466 }
1467
1468 /*
1469 * Issue a READ LOG EXT command to the device.
1470 *
1471 * @port pointer to the port structure.
1472 * @page page number to fetch
1473 * @buffer pointer to buffer
1474 * @buffer_dma dma address corresponding to @buffer
1475 * @sectors page length to fetch, in sectors
1476 *
1477 * return value
1478 * @rv return value from mtip_exec_internal_command()
1479 */
1480 static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
1481 dma_addr_t buffer_dma, unsigned int sectors)
1482 {
1483 struct host_to_dev_fis fis;
1484
1485 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1486 fis.type = 0x27;
1487 fis.opts = 1 << 7;
1488 fis.command = ATA_CMD_READ_LOG_EXT;
1489 fis.sect_count = sectors & 0xFF;
1490 fis.sect_cnt_ex = (sectors >> 8) & 0xFF;
1491 fis.lba_low = page;
1492 fis.lba_mid = 0;
1493 fis.device = ATA_DEVICE_OBS;
1494
1495 memset(buffer, 0, sectors * ATA_SECT_SIZE);
1496
1497 return mtip_exec_internal_command(port,
1498 &fis,
1499 5,
1500 buffer_dma,
1501 sectors * ATA_SECT_SIZE,
1502 0,
1503 GFP_ATOMIC,
1504 MTIP_INT_CMD_TIMEOUT_MS);
1505 }
1506
1507 /*
1508 * Issue a SMART READ DATA command to the device.
1509 *
1510 * @port pointer to the port structure.
1511 * @buffer pointer to buffer
1512 * @buffer_dma dma address corresponding to @buffer
1513 *
1514 * return value
1515 * @rv return value from mtip_exec_internal_command()
1516 */
1517 static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer,
1518 dma_addr_t buffer_dma)
1519 {
1520 struct host_to_dev_fis fis;
1521
1522 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1523 fis.type = 0x27;
1524 fis.opts = 1 << 7;
1525 fis.command = ATA_CMD_SMART;
1526 fis.features = 0xD0;
1527 fis.sect_count = 1;
1528 fis.lba_mid = 0x4F;
1529 fis.lba_hi = 0xC2;
1530 fis.device = ATA_DEVICE_OBS;
1531
1532 return mtip_exec_internal_command(port,
1533 &fis,
1534 5,
1535 buffer_dma,
1536 ATA_SECT_SIZE,
1537 0,
1538 GFP_ATOMIC,
1539 15000);
1540 }
1541
1542 /*
1543 * Get the value of a smart attribute
1544 *
1545 * @port pointer to the port structure
1546 * @id attribute number
1547 * @attrib pointer to return attrib information corresponding to @id
1548 *
1549 * return value
1550 * -EINVAL NULL buffer passed or unsupported attribute @id.
1551 * -EPERM Identify data not valid, SMART not supported or not enabled
1552 */
1553 static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
1554 struct smart_attr *attrib)
1555 {
1556 int rv, i;
1557 struct smart_attr *pattr;
1558
1559 if (!attrib)
1560 return -EINVAL;
1561
1562 if (!port->identify_valid) {
1563 dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n");
1564 return -EPERM;
1565 }
1566 if (!(port->identify[82] & 0x1)) {
1567 dev_warn(&port->dd->pdev->dev, "SMART not supported\n");
1568 return -EPERM;
1569 }
1570 if (!(port->identify[85] & 0x1)) {
1571 dev_warn(&port->dd->pdev->dev, "SMART not enabled\n");
1572 return -EPERM;
1573 }
1574
1575 memset(port->smart_buf, 0, ATA_SECT_SIZE);
1576 rv = mtip_get_smart_data(port, port->smart_buf, port->smart_buf_dma);
1577 if (rv) {
1578 dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n");
1579 return rv;
1580 }
1581
1582 pattr = (struct smart_attr *)(port->smart_buf + 2);
1583 for (i = 0; i < 29; i++, pattr++)
1584 if (pattr->attr_id == id) {
1585 memcpy(attrib, pattr, sizeof(struct smart_attr));
1586 break;
1587 }
1588
1589 if (i == 29) {
1590 dev_warn(&port->dd->pdev->dev,
1591 "Query for invalid SMART attribute ID\n");
1592 rv = -EINVAL;
1593 }
1594
1595 return rv;
1596 }
1597
1598 /*
1599 * Trim unused sectors
1600 *
1601 * @dd pointer to driver_data structure
1602 * @lba starting lba
1603 * @len # of 512b sectors to trim
1604 *
1605 * return value
1606 * -ENOMEM Out of dma memory
1607 * -EINVAL Invalid parameters passed in, trim not supported
1608 * -EIO Error submitting trim request to hw
1609 */
1610 static int mtip_send_trim(struct driver_data *dd, unsigned int lba,
1611 unsigned int len)
1612 {
1613 int i, rv = 0;
1614 u64 tlba, tlen, sect_left;
1615 struct mtip_trim_entry *buf;
1616 dma_addr_t dma_addr;
1617 struct host_to_dev_fis fis;
1618
1619 if (!len || dd->trim_supp == false)
1620 return -EINVAL;
1621
1622 /* Trim request too big */
1623 WARN_ON(len > (MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES));
1624
1625 /* Trim request not aligned on 4k boundary */
1626 WARN_ON(len % 8 != 0);
1627
1628 /* Warn if vu_trim structure is too big */
1629 WARN_ON(sizeof(struct mtip_trim) > ATA_SECT_SIZE);
1630
1631 /* Allocate a DMA buffer for the trim structure */
1632 buf = dmam_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr,
1633 GFP_KERNEL);
1634 if (!buf)
1635 return -ENOMEM;
1636 memset(buf, 0, ATA_SECT_SIZE);
1637
1638 for (i = 0, sect_left = len, tlba = lba;
1639 i < MTIP_MAX_TRIM_ENTRIES && sect_left;
1640 i++) {
1641 tlen = (sect_left >= MTIP_MAX_TRIM_ENTRY_LEN ?
1642 MTIP_MAX_TRIM_ENTRY_LEN :
1643 sect_left);
1644 buf[i].lba = __force_bit2int cpu_to_le32(tlba);
1645 buf[i].range = __force_bit2int cpu_to_le16(tlen);
1646 tlba += tlen;
1647 sect_left -= tlen;
1648 }
1649 WARN_ON(sect_left != 0);
1650
1651 /* Build the fis */
1652 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1653 fis.type = 0x27;
1654 fis.opts = 1 << 7;
1655 fis.command = 0xfb;
1656 fis.features = 0x60;
1657 fis.sect_count = 1;
1658 fis.device = ATA_DEVICE_OBS;
1659
1660 if (mtip_exec_internal_command(dd->port,
1661 &fis,
1662 5,
1663 dma_addr,
1664 ATA_SECT_SIZE,
1665 0,
1666 GFP_KERNEL,
1667 MTIP_TRIM_TIMEOUT_MS) < 0)
1668 rv = -EIO;
1669
1670 dmam_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr);
1671 return rv;
1672 }
1673
1674 /*
1675 * Get the drive capacity.
1676 *
1677 * @dd Pointer to the device data structure.
1678 * @sectors Pointer to the variable that will receive the sector count.
1679 *
1680 * return value
1681 * 1 Capacity was returned successfully.
1682 * 0 The identify information is invalid.
1683 */
1684 static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
1685 {
1686 struct mtip_port *port = dd->port;
1687 u64 total, raw0, raw1, raw2, raw3;
1688 raw0 = port->identify[100];
1689 raw1 = port->identify[101];
1690 raw2 = port->identify[102];
1691 raw3 = port->identify[103];
1692 total = raw0 | raw1<<16 | raw2<<32 | raw3<<48;
1693 *sectors = total;
1694 return (bool) !!port->identify_valid;
1695 }
1696
1697 /*
1698 * Display the identify command data.
1699 *
1700 * @port Pointer to the port data structure.
1701 *
1702 * return value
1703 * None
1704 */
1705 static void mtip_dump_identify(struct mtip_port *port)
1706 {
1707 sector_t sectors;
1708 unsigned short revid;
1709 char cbuf[42];
1710
1711 if (!port->identify_valid)
1712 return;
1713
1714 strlcpy(cbuf, (char *)(port->identify+10), 21);
1715 dev_info(&port->dd->pdev->dev,
1716 "Serial No.: %s\n", cbuf);
1717
1718 strlcpy(cbuf, (char *)(port->identify+23), 9);
1719 dev_info(&port->dd->pdev->dev,
1720 "Firmware Ver.: %s\n", cbuf);
1721
1722 strlcpy(cbuf, (char *)(port->identify+27), 41);
1723 dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
1724
1725 dev_info(&port->dd->pdev->dev, "Security: %04x %s\n",
1726 port->identify[128],
1727 port->identify[128] & 0x4 ? "(LOCKED)" : "");
1728
1729 if (mtip_hw_get_capacity(port->dd, &sectors))
1730 dev_info(&port->dd->pdev->dev,
1731 "Capacity: %llu sectors (%llu MB)\n",
1732 (u64)sectors,
1733 ((u64)sectors) * ATA_SECT_SIZE >> 20);
1734
1735 pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid);
1736 switch (revid & 0xFF) {
1737 case 0x1:
1738 strlcpy(cbuf, "A0", 3);
1739 break;
1740 case 0x3:
1741 strlcpy(cbuf, "A2", 3);
1742 break;
1743 default:
1744 strlcpy(cbuf, "?", 2);
1745 break;
1746 }
1747 dev_info(&port->dd->pdev->dev,
1748 "Card Type: %s\n", cbuf);
1749 }
1750
1751 /*
1752 * Map the commands scatter list into the command table.
1753 *
1754 * @command Pointer to the command.
1755 * @nents Number of scatter list entries.
1756 *
1757 * return value
1758 * None
1759 */
1760 static inline void fill_command_sg(struct driver_data *dd,
1761 struct mtip_cmd *command,
1762 int nents)
1763 {
1764 int n;
1765 unsigned int dma_len;
1766 struct mtip_cmd_sg *command_sg;
1767 struct scatterlist *sg = command->sg;
1768
1769 command_sg = command->command + AHCI_CMD_TBL_HDR_SZ;
1770
1771 for (n = 0; n < nents; n++) {
1772 dma_len = sg_dma_len(sg);
1773 if (dma_len > 0x400000)
1774 dev_err(&dd->pdev->dev,
1775 "DMA segment length truncated\n");
1776 command_sg->info = __force_bit2int
1777 cpu_to_le32((dma_len-1) & 0x3FFFFF);
1778 command_sg->dba = __force_bit2int
1779 cpu_to_le32(sg_dma_address(sg));
1780 command_sg->dba_upper = __force_bit2int
1781 cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
1782 command_sg++;
1783 sg++;
1784 }
1785 }
1786
1787 /*
1788 * @brief Execute a drive command.
1789 *
1790 * return value 0 The command completed successfully.
1791 * return value -1 An error occurred while executing the command.
1792 */
1793 static int exec_drive_task(struct mtip_port *port, u8 *command)
1794 {
1795 struct host_to_dev_fis fis;
1796 struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
1797 unsigned int to;
1798
1799 /* Build the FIS. */
1800 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1801 fis.type = 0x27;
1802 fis.opts = 1 << 7;
1803 fis.command = command[0];
1804 fis.features = command[1];
1805 fis.sect_count = command[2];
1806 fis.sector = command[3];
1807 fis.cyl_low = command[4];
1808 fis.cyl_hi = command[5];
1809 fis.device = command[6] & ~0x10; /* Clear the dev bit*/
1810
1811 mtip_set_timeout(port->dd, &fis, &to, 0);
1812
1813 dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n",
1814 __func__,
1815 command[0],
1816 command[1],
1817 command[2],
1818 command[3],
1819 command[4],
1820 command[5],
1821 command[6]);
1822
1823 /* Execute the command. */
1824 if (mtip_exec_internal_command(port,
1825 &fis,
1826 5,
1827 0,
1828 0,
1829 0,
1830 GFP_KERNEL,
1831 to) < 0) {
1832 return -1;
1833 }
1834
1835 command[0] = reply->command; /* Status*/
1836 command[1] = reply->features; /* Error*/
1837 command[4] = reply->cyl_low;
1838 command[5] = reply->cyl_hi;
1839
1840 dbg_printk(MTIP_DRV_NAME " %s: Completion Status: stat %x, err %x , cyl_lo %x cyl_hi %x\n",
1841 __func__,
1842 command[0],
1843 command[1],
1844 command[4],
1845 command[5]);
1846
1847 return 0;
1848 }
1849
1850 /*
1851 * @brief Execute a drive command.
1852 *
1853 * @param port Pointer to the port data structure.
1854 * @param command Pointer to the user specified command parameters.
1855 * @param user_buffer Pointer to the user space buffer where read sector
1856 * data should be copied.
1857 *
1858 * return value 0 The command completed successfully.
1859 * return value -EFAULT An error occurred while copying the completion
1860 * data to the user space buffer.
1861 * return value -1 An error occurred while executing the command.
1862 */
1863 static int exec_drive_command(struct mtip_port *port, u8 *command,
1864 void __user *user_buffer)
1865 {
1866 struct host_to_dev_fis fis;
1867 struct host_to_dev_fis *reply;
1868 u8 *buf = NULL;
1869 dma_addr_t dma_addr = 0;
1870 int rv = 0, xfer_sz = command[3];
1871 unsigned int to;
1872
1873 if (xfer_sz) {
1874 if (!user_buffer)
1875 return -EFAULT;
1876
1877 buf = dmam_alloc_coherent(&port->dd->pdev->dev,
1878 ATA_SECT_SIZE * xfer_sz,
1879 &dma_addr,
1880 GFP_KERNEL);
1881 if (!buf) {
1882 dev_err(&port->dd->pdev->dev,
1883 "Memory allocation failed (%d bytes)\n",
1884 ATA_SECT_SIZE * xfer_sz);
1885 return -ENOMEM;
1886 }
1887 memset(buf, 0, ATA_SECT_SIZE * xfer_sz);
1888 }
1889
1890 /* Build the FIS. */
1891 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1892 fis.type = 0x27;
1893 fis.opts = 1 << 7;
1894 fis.command = command[0];
1895 fis.features = command[2];
1896 fis.sect_count = command[3];
1897 if (fis.command == ATA_CMD_SMART) {
1898 fis.sector = command[1];
1899 fis.cyl_low = 0x4F;
1900 fis.cyl_hi = 0xC2;
1901 }
1902
1903 mtip_set_timeout(port->dd, &fis, &to, 0);
1904
1905 if (xfer_sz)
1906 reply = (port->rxfis + RX_FIS_PIO_SETUP);
1907 else
1908 reply = (port->rxfis + RX_FIS_D2H_REG);
1909
1910 dbg_printk(MTIP_DRV_NAME
1911 " %s: User Command: cmd %x, sect %x, "
1912 "feat %x, sectcnt %x\n",
1913 __func__,
1914 command[0],
1915 command[1],
1916 command[2],
1917 command[3]);
1918
1919 /* Execute the command. */
1920 if (mtip_exec_internal_command(port,
1921 &fis,
1922 5,
1923 (xfer_sz ? dma_addr : 0),
1924 (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
1925 0,
1926 GFP_KERNEL,
1927 to)
1928 < 0) {
1929 rv = -EFAULT;
1930 goto exit_drive_command;
1931 }
1932
1933 /* Collect the completion status. */
1934 command[0] = reply->command; /* Status*/
1935 command[1] = reply->features; /* Error*/
1936 command[2] = reply->sect_count;
1937
1938 dbg_printk(MTIP_DRV_NAME
1939 " %s: Completion Status: stat %x, "
1940 "err %x, nsect %x\n",
1941 __func__,
1942 command[0],
1943 command[1],
1944 command[2]);
1945
1946 if (xfer_sz) {
1947 if (copy_to_user(user_buffer,
1948 buf,
1949 ATA_SECT_SIZE * command[3])) {
1950 rv = -EFAULT;
1951 goto exit_drive_command;
1952 }
1953 }
1954 exit_drive_command:
1955 if (buf)
1956 dmam_free_coherent(&port->dd->pdev->dev,
1957 ATA_SECT_SIZE * xfer_sz, buf, dma_addr);
1958 return rv;
1959 }
1960
1961 /*
1962 * Indicates whether a command has a single sector payload.
1963 *
1964 * @command passed to the device to perform the certain event.
1965 * @features passed to the device to perform the certain event.
1966 *
1967 * return value
1968 * 1 command is one that always has a single sector payload,
1969 * regardless of the value in the Sector Count field.
1970 * 0 otherwise
1971 *
1972 */
1973 static unsigned int implicit_sector(unsigned char command,
1974 unsigned char features)
1975 {
1976 unsigned int rv = 0;
1977
1978 /* list of commands that have an implicit sector count of 1 */
1979 switch (command) {
1980 case ATA_CMD_SEC_SET_PASS:
1981 case ATA_CMD_SEC_UNLOCK:
1982 case ATA_CMD_SEC_ERASE_PREP:
1983 case ATA_CMD_SEC_ERASE_UNIT:
1984 case ATA_CMD_SEC_FREEZE_LOCK:
1985 case ATA_CMD_SEC_DISABLE_PASS:
1986 case ATA_CMD_PMP_READ:
1987 case ATA_CMD_PMP_WRITE:
1988 rv = 1;
1989 break;
1990 case ATA_CMD_SET_MAX:
1991 if (features == ATA_SET_MAX_UNLOCK)
1992 rv = 1;
1993 break;
1994 case ATA_CMD_SMART:
1995 if ((features == ATA_SMART_READ_VALUES) ||
1996 (features == ATA_SMART_READ_THRESHOLDS))
1997 rv = 1;
1998 break;
1999 case ATA_CMD_CONF_OVERLAY:
2000 if ((features == ATA_DCO_IDENTIFY) ||
2001 (features == ATA_DCO_SET))
2002 rv = 1;
2003 break;
2004 }
2005 return rv;
2006 }
2007
2008 /*
2009 * Executes a taskfile
2010 * See ide_taskfile_ioctl() for derivation
2011 */
2012 static int exec_drive_taskfile(struct driver_data *dd,
2013 void __user *buf,
2014 ide_task_request_t *req_task,
2015 int outtotal)
2016 {
2017 struct host_to_dev_fis fis;
2018 struct host_to_dev_fis *reply;
2019 u8 *outbuf = NULL;
2020 u8 *inbuf = NULL;
2021 dma_addr_t outbuf_dma = 0;
2022 dma_addr_t inbuf_dma = 0;
2023 dma_addr_t dma_buffer = 0;
2024 int err = 0;
2025 unsigned int taskin = 0;
2026 unsigned int taskout = 0;
2027 u8 nsect = 0;
2028 unsigned int timeout;
2029 unsigned int force_single_sector;
2030 unsigned int transfer_size;
2031 unsigned long task_file_data;
2032 int intotal = outtotal + req_task->out_size;
2033 int erasemode = 0;
2034
2035 taskout = req_task->out_size;
2036 taskin = req_task->in_size;
2037 /* 130560 = 512 * 0xFF*/
2038 if (taskin > 130560 || taskout > 130560) {
2039 err = -EINVAL;
2040 goto abort;
2041 }
2042
2043 if (taskout) {
2044 outbuf = memdup_user(buf + outtotal, taskout);
2045 if (IS_ERR(outbuf)) {
2046 err = PTR_ERR(outbuf);
2047 outbuf = NULL;
2048 goto abort;
2049 }
2050 outbuf_dma = pci_map_single(dd->pdev,
2051 outbuf,
2052 taskout,
2053 DMA_TO_DEVICE);
2054 if (pci_dma_mapping_error(dd->pdev, outbuf_dma)) {
2055 err = -ENOMEM;
2056 goto abort;
2057 }
2058 dma_buffer = outbuf_dma;
2059 }
2060
2061 if (taskin) {
2062 inbuf = memdup_user(buf + intotal, taskin);
2063 if (IS_ERR(inbuf)) {
2064 err = PTR_ERR(inbuf);
2065 inbuf = NULL;
2066 goto abort;
2067 }
2068 inbuf_dma = pci_map_single(dd->pdev,
2069 inbuf,
2070 taskin, DMA_FROM_DEVICE);
2071 if (pci_dma_mapping_error(dd->pdev, inbuf_dma)) {
2072 err = -ENOMEM;
2073 goto abort;
2074 }
2075 dma_buffer = inbuf_dma;
2076 }
2077
2078 /* only supports PIO and non-data commands from this ioctl. */
2079 switch (req_task->data_phase) {
2080 case TASKFILE_OUT:
2081 nsect = taskout / ATA_SECT_SIZE;
2082 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
2083 break;
2084 case TASKFILE_IN:
2085 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
2086 break;
2087 case TASKFILE_NO_DATA:
2088 reply = (dd->port->rxfis + RX_FIS_D2H_REG);
2089 break;
2090 default:
2091 err = -EINVAL;
2092 goto abort;
2093 }
2094
2095 /* Build the FIS. */
2096 memset(&fis, 0, sizeof(struct host_to_dev_fis));
2097
2098 fis.type = 0x27;
2099 fis.opts = 1 << 7;
2100 fis.command = req_task->io_ports[7];
2101 fis.features = req_task->io_ports[1];
2102 fis.sect_count = req_task->io_ports[2];
2103 fis.lba_low = req_task->io_ports[3];
2104 fis.lba_mid = req_task->io_ports[4];
2105 fis.lba_hi = req_task->io_ports[5];
2106 /* Clear the dev bit*/
2107 fis.device = req_task->io_ports[6] & ~0x10;
2108
2109 if ((req_task->in_flags.all == 0) && (req_task->out_flags.all & 1)) {
2110 req_task->in_flags.all =
2111 IDE_TASKFILE_STD_IN_FLAGS |
2112 (IDE_HOB_STD_IN_FLAGS << 8);
2113 fis.lba_low_ex = req_task->hob_ports[3];
2114 fis.lba_mid_ex = req_task->hob_ports[4];
2115 fis.lba_hi_ex = req_task->hob_ports[5];
2116 fis.features_ex = req_task->hob_ports[1];
2117 fis.sect_cnt_ex = req_task->hob_ports[2];
2118
2119 } else {
2120 req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
2121 }
2122
2123 force_single_sector = implicit_sector(fis.command, fis.features);
2124
2125 if ((taskin || taskout) && (!fis.sect_count)) {
2126 if (nsect)
2127 fis.sect_count = nsect;
2128 else {
2129 if (!force_single_sector) {
2130 dev_warn(&dd->pdev->dev,
2131 "data movement but "
2132 "sect_count is 0\n");
2133 err = -EINVAL;
2134 goto abort;
2135 }
2136 }
2137 }
2138
2139 dbg_printk(MTIP_DRV_NAME
2140 " %s: cmd %x, feat %x, nsect %x,"
2141 " sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x,"
2142 " head/dev %x\n",
2143 __func__,
2144 fis.command,
2145 fis.features,
2146 fis.sect_count,
2147 fis.lba_low,
2148 fis.lba_mid,
2149 fis.lba_hi,
2150 fis.device);
2151
2152 /* check for erase mode support during secure erase.*/
2153 if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf &&
2154 (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
2155 erasemode = 1;
2156 }
2157
2158 mtip_set_timeout(dd, &fis, &timeout, erasemode);
2159
2160 /* Determine the correct transfer size.*/
2161 if (force_single_sector)
2162 transfer_size = ATA_SECT_SIZE;
2163 else
2164 transfer_size = ATA_SECT_SIZE * fis.sect_count;
2165
2166 /* Execute the command.*/
2167 if (mtip_exec_internal_command(dd->port,
2168 &fis,
2169 5,
2170 dma_buffer,
2171 transfer_size,
2172 0,
2173 GFP_KERNEL,
2174 timeout) < 0) {
2175 err = -EIO;
2176 goto abort;
2177 }
2178
2179 task_file_data = readl(dd->port->mmio+PORT_TFDATA);
2180
2181 if ((req_task->data_phase == TASKFILE_IN) && !(task_file_data & 1)) {
2182 reply = dd->port->rxfis + RX_FIS_PIO_SETUP;
2183 req_task->io_ports[7] = reply->control;
2184 } else {
2185 reply = dd->port->rxfis + RX_FIS_D2H_REG;
2186 req_task->io_ports[7] = reply->command;
2187 }
2188
2189 /* reclaim the DMA buffers.*/
2190 if (inbuf_dma)
2191 pci_unmap_single(dd->pdev, inbuf_dma,
2192 taskin, DMA_FROM_DEVICE);
2193 if (outbuf_dma)
2194 pci_unmap_single(dd->pdev, outbuf_dma,
2195 taskout, DMA_TO_DEVICE);
2196 inbuf_dma = 0;
2197 outbuf_dma = 0;
2198
2199 /* return the ATA registers to the caller.*/
2200 req_task->io_ports[1] = reply->features;
2201 req_task->io_ports[2] = reply->sect_count;
2202 req_task->io_ports[3] = reply->lba_low;
2203 req_task->io_ports[4] = reply->lba_mid;
2204 req_task->io_ports[5] = reply->lba_hi;
2205 req_task->io_ports[6] = reply->device;
2206
2207 if (req_task->out_flags.all & 1) {
2208
2209 req_task->hob_ports[3] = reply->lba_low_ex;
2210 req_task->hob_ports[4] = reply->lba_mid_ex;
2211 req_task->hob_ports[5] = reply->lba_hi_ex;
2212 req_task->hob_ports[1] = reply->features_ex;
2213 req_task->hob_ports[2] = reply->sect_cnt_ex;
2214 }
2215 dbg_printk(MTIP_DRV_NAME
2216 " %s: Completion: stat %x,"
2217 "err %x, sect_cnt %x, lbalo %x,"
2218 "lbamid %x, lbahi %x, dev %x\n",
2219 __func__,
2220 req_task->io_ports[7],
2221 req_task->io_ports[1],
2222 req_task->io_ports[2],
2223 req_task->io_ports[3],
2224 req_task->io_ports[4],
2225 req_task->io_ports[5],
2226 req_task->io_ports[6]);
2227
2228 if (taskout) {
2229 if (copy_to_user(buf + outtotal, outbuf, taskout)) {
2230 err = -EFAULT;
2231 goto abort;
2232 }
2233 }
2234 if (taskin) {
2235 if (copy_to_user(buf + intotal, inbuf, taskin)) {
2236 err = -EFAULT;
2237 goto abort;
2238 }
2239 }
2240 abort:
2241 if (inbuf_dma)
2242 pci_unmap_single(dd->pdev, inbuf_dma,
2243 taskin, DMA_FROM_DEVICE);
2244 if (outbuf_dma)
2245 pci_unmap_single(dd->pdev, outbuf_dma,
2246 taskout, DMA_TO_DEVICE);
2247 kfree(outbuf);
2248 kfree(inbuf);
2249
2250 return err;
2251 }
2252
2253 /*
2254 * Handle IOCTL calls from the Block Layer.
2255 *
2256 * This function is called by the Block Layer when it receives an IOCTL
2257 * command that it does not understand. If the IOCTL command is not supported
2258 * this function returns -ENOTTY.
2259 *
2260 * @dd Pointer to the driver data structure.
2261 * @cmd IOCTL command passed from the Block Layer.
2262 * @arg IOCTL argument passed from the Block Layer.
2263 *
2264 * return value
2265 * 0 The IOCTL completed successfully.
2266 * -ENOTTY The specified command is not supported.
2267 * -EFAULT An error occurred copying data to a user space buffer.
2268 * -EIO An error occurred while executing the command.
2269 */
2270 static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
2271 unsigned long arg)
2272 {
2273 switch (cmd) {
2274 case HDIO_GET_IDENTITY:
2275 {
2276 if (copy_to_user((void __user *)arg, dd->port->identify,
2277 sizeof(u16) * ATA_ID_WORDS))
2278 return -EFAULT;
2279 break;
2280 }
2281 case HDIO_DRIVE_CMD:
2282 {
2283 u8 drive_command[4];
2284
2285 /* Copy the user command info to our buffer. */
2286 if (copy_from_user(drive_command,
2287 (void __user *) arg,
2288 sizeof(drive_command)))
2289 return -EFAULT;
2290
2291 /* Execute the drive command. */
2292 if (exec_drive_command(dd->port,
2293 drive_command,
2294 (void __user *) (arg+4)))
2295 return -EIO;
2296
2297 /* Copy the status back to the users buffer. */
2298 if (copy_to_user((void __user *) arg,
2299 drive_command,
2300 sizeof(drive_command)))
2301 return -EFAULT;
2302
2303 break;
2304 }
2305 case HDIO_DRIVE_TASK:
2306 {
2307 u8 drive_command[7];
2308
2309 /* Copy the user command info to our buffer. */
2310 if (copy_from_user(drive_command,
2311 (void __user *) arg,
2312 sizeof(drive_command)))
2313 return -EFAULT;
2314
2315 /* Execute the drive command. */
2316 if (exec_drive_task(dd->port, drive_command))
2317 return -EIO;
2318
2319 /* Copy the status back to the users buffer. */
2320 if (copy_to_user((void __user *) arg,
2321 drive_command,
2322 sizeof(drive_command)))
2323 return -EFAULT;
2324
2325 break;
2326 }
2327 case HDIO_DRIVE_TASKFILE: {
2328 ide_task_request_t req_task;
2329 int ret, outtotal;
2330
2331 if (copy_from_user(&req_task, (void __user *) arg,
2332 sizeof(req_task)))
2333 return -EFAULT;
2334
2335 outtotal = sizeof(req_task);
2336
2337 ret = exec_drive_taskfile(dd, (void __user *) arg,
2338 &req_task, outtotal);
2339
2340 if (copy_to_user((void __user *) arg, &req_task,
2341 sizeof(req_task)))
2342 return -EFAULT;
2343
2344 return ret;
2345 }
2346
2347 default:
2348 return -EINVAL;
2349 }
2350 return 0;
2351 }
2352
2353 /*
2354 * Submit an IO to the hw
2355 *
2356 * This function is called by the block layer to issue an io
2357 * to the device. Upon completion, the callback function will
2358 * be called with the data parameter passed as the callback data.
2359 *
2360 * @dd Pointer to the driver data structure.
2361 * @start First sector to read.
2362 * @nsect Number of sectors to read.
2363 * @nents Number of entries in scatter list for the read command.
2364 * @tag The tag of this read command.
2365 * @callback Pointer to the function that should be called
2366 * when the read completes.
2367 * @data Callback data passed to the callback function
2368 * when the read completes.
2369 * @dir Direction (read or write)
2370 *
2371 * return value
2372 * None
2373 */
2374 static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
2375 struct mtip_cmd *command, int nents,
2376 struct blk_mq_hw_ctx *hctx)
2377 {
2378 struct host_to_dev_fis *fis;
2379 struct mtip_port *port = dd->port;
2380 int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2381 u64 start = blk_rq_pos(rq);
2382 unsigned int nsect = blk_rq_sectors(rq);
2383
2384 /* Map the scatter list for DMA access */
2385 nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
2386
2387 prefetch(&port->flags);
2388
2389 command->scatter_ents = nents;
2390
2391 /*
2392 * The number of retries for this command before it is
2393 * reported as a failure to the upper layers.
2394 */
2395 command->retries = MTIP_MAX_RETRIES;
2396
2397 /* Fill out fis */
2398 fis = command->command;
2399 fis->type = 0x27;
2400 fis->opts = 1 << 7;
2401 if (dma_dir == DMA_FROM_DEVICE)
2402 fis->command = ATA_CMD_FPDMA_READ;
2403 else
2404 fis->command = ATA_CMD_FPDMA_WRITE;
2405 fis->lba_low = start & 0xFF;
2406 fis->lba_mid = (start >> 8) & 0xFF;
2407 fis->lba_hi = (start >> 16) & 0xFF;
2408 fis->lba_low_ex = (start >> 24) & 0xFF;
2409 fis->lba_mid_ex = (start >> 32) & 0xFF;
2410 fis->lba_hi_ex = (start >> 40) & 0xFF;
2411 fis->device = 1 << 6;
2412 fis->features = nsect & 0xFF;
2413 fis->features_ex = (nsect >> 8) & 0xFF;
2414 fis->sect_count = ((rq->tag << 3) | (rq->tag >> 5));
2415 fis->sect_cnt_ex = 0;
2416 fis->control = 0;
2417 fis->res2 = 0;
2418 fis->res3 = 0;
2419 fill_command_sg(dd, command, nents);
2420
2421 if (unlikely(command->unaligned))
2422 fis->device |= 1 << 7;
2423
2424 /* Populate the command header */
2425 command->command_header->opts =
2426 __force_bit2int cpu_to_le32(
2427 (nents << 16) | 5 | AHCI_CMD_PREFETCH);
2428 command->command_header->byte_count = 0;
2429
2430 /*
2431 * Set the completion function and data for the command
2432 * within this layer.
2433 */
2434 command->comp_data = dd;
2435 command->comp_func = mtip_async_complete;
2436 command->direction = dma_dir;
2437
2438 /*
2439 * To prevent this command from being issued
2440 * if an internal command is in progress or error handling is active.
2441 */
2442 if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) {
2443 set_bit(rq->tag, port->cmds_to_issue);
2444 set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
2445 return;
2446 }
2447
2448 /* Issue the command to the hardware */
2449 mtip_issue_ncq_command(port, rq->tag);
2450 }
2451
2452 /*
2453 * Sysfs status dump.
2454 *
2455 * @dev Pointer to the device structure, passed by the kernrel.
2456 * @attr Pointer to the device_attribute structure passed by the kernel.
2457 * @buf Pointer to the char buffer that will receive the stats info.
2458 *
2459 * return value
2460 * The size, in bytes, of the data copied into buf.
2461 */
2462 static ssize_t mtip_hw_show_status(struct device *dev,
2463 struct device_attribute *attr,
2464 char *buf)
2465 {
2466 struct driver_data *dd = dev_to_disk(dev)->private_data;
2467 int size = 0;
2468
2469 if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
2470 size += sprintf(buf, "%s", "thermal_shutdown\n");
2471 else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
2472 size += sprintf(buf, "%s", "write_protect\n");
2473 else
2474 size += sprintf(buf, "%s", "online\n");
2475
2476 return size;
2477 }
2478
2479 static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
2480
2481 /* debugsfs entries */
2482
2483 static ssize_t show_device_status(struct device_driver *drv, char *buf)
2484 {
2485 int size = 0;
2486 struct driver_data *dd, *tmp;
2487 unsigned long flags;
2488 char id_buf[42];
2489 u16 status = 0;
2490
2491 spin_lock_irqsave(&dev_lock, flags);
2492 size += sprintf(&buf[size], "Devices Present:\n");
2493 list_for_each_entry_safe(dd, tmp, &online_list, online_list) {
2494 if (dd->pdev) {
2495 if (dd->port &&
2496 dd->port->identify &&
2497 dd->port->identify_valid) {
2498 strlcpy(id_buf,
2499 (char *) (dd->port->identify + 10), 21);
2500 status = *(dd->port->identify + 141);
2501 } else {
2502 memset(id_buf, 0, 42);
2503 status = 0;
2504 }
2505
2506 if (dd->port &&
2507 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
2508 size += sprintf(&buf[size],
2509 " device %s %s (ftl rebuild %d %%)\n",
2510 dev_name(&dd->pdev->dev),
2511 id_buf,
2512 status);
2513 } else {
2514 size += sprintf(&buf[size],
2515 " device %s %s\n",
2516 dev_name(&dd->pdev->dev),
2517 id_buf);
2518 }
2519 }
2520 }
2521
2522 size += sprintf(&buf[size], "Devices Being Removed:\n");
2523 list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) {
2524 if (dd->pdev) {
2525 if (dd->port &&
2526 dd->port->identify &&
2527 dd->port->identify_valid) {
2528 strlcpy(id_buf,
2529 (char *) (dd->port->identify+10), 21);
2530 status = *(dd->port->identify + 141);
2531 } else {
2532 memset(id_buf, 0, 42);
2533 status = 0;
2534 }
2535
2536 if (dd->port &&
2537 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
2538 size += sprintf(&buf[size],
2539 " device %s %s (ftl rebuild %d %%)\n",
2540 dev_name(&dd->pdev->dev),
2541 id_buf,
2542 status);
2543 } else {
2544 size += sprintf(&buf[size],
2545 " device %s %s\n",
2546 dev_name(&dd->pdev->dev),
2547 id_buf);
2548 }
2549 }
2550 }
2551 spin_unlock_irqrestore(&dev_lock, flags);
2552
2553 return size;
2554 }
2555
2556 static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
2557 size_t len, loff_t *offset)
2558 {
2559 struct driver_data *dd = (struct driver_data *)f->private_data;
2560 int size = *offset;
2561 char *buf;
2562 int rv = 0;
2563
2564 if (!len || *offset)
2565 return 0;
2566
2567 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
2568 if (!buf) {
2569 dev_err(&dd->pdev->dev,
2570 "Memory allocation: status buffer\n");
2571 return -ENOMEM;
2572 }
2573
2574 size += show_device_status(NULL, buf);
2575
2576 *offset = size <= len ? size : len;
2577 size = copy_to_user(ubuf, buf, *offset);
2578 if (size)
2579 rv = -EFAULT;
2580
2581 kfree(buf);
2582 return rv ? rv : *offset;
2583 }
2584
2585 static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
2586 size_t len, loff_t *offset)
2587 {
2588 struct driver_data *dd = (struct driver_data *)f->private_data;
2589 char *buf;
2590 u32 group_allocated;
2591 int size = *offset;
2592 int n, rv = 0;
2593
2594 if (!len || size)
2595 return 0;
2596
2597 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
2598 if (!buf) {
2599 dev_err(&dd->pdev->dev,
2600 "Memory allocation: register buffer\n");
2601 return -ENOMEM;
2602 }
2603
2604 size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
2605
2606 for (n = dd->slot_groups-1; n >= 0; n--)
2607 size += sprintf(&buf[size], "%08X ",
2608 readl(dd->port->s_active[n]));
2609
2610 size += sprintf(&buf[size], "]\n");
2611 size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
2612
2613 for (n = dd->slot_groups-1; n >= 0; n--)
2614 size += sprintf(&buf[size], "%08X ",
2615 readl(dd->port->cmd_issue[n]));
2616
2617 size += sprintf(&buf[size], "]\n");
2618 size += sprintf(&buf[size], "H/ Completed : [ 0x");
2619
2620 for (n = dd->slot_groups-1; n >= 0; n--)
2621 size += sprintf(&buf[size], "%08X ",
2622 readl(dd->port->completed[n]));
2623
2624 size += sprintf(&buf[size], "]\n");
2625 size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
2626 readl(dd->port->mmio + PORT_IRQ_STAT));
2627 size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
2628 readl(dd->mmio + HOST_IRQ_STAT));
2629 size += sprintf(&buf[size], "\n");
2630
2631 size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
2632
2633 for (n = dd->slot_groups-1; n >= 0; n--) {
2634 if (sizeof(long) > sizeof(u32))
2635 group_allocated =
2636 dd->port->cmds_to_issue[n/2] >> (32*(n&1));
2637 else
2638 group_allocated = dd->port->cmds_to_issue[n];
2639 size += sprintf(&buf[size], "%08X ", group_allocated);
2640 }
2641 size += sprintf(&buf[size], "]\n");
2642
2643 *offset = size <= len ? size : len;
2644 size = copy_to_user(ubuf, buf, *offset);
2645 if (size)
2646 rv = -EFAULT;
2647
2648 kfree(buf);
2649 return rv ? rv : *offset;
2650 }
2651
2652 static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
2653 size_t len, loff_t *offset)
2654 {
2655 struct driver_data *dd = (struct driver_data *)f->private_data;
2656 char *buf;
2657 int size = *offset;
2658 int rv = 0;
2659
2660 if (!len || size)
2661 return 0;
2662
2663 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
2664 if (!buf) {
2665 dev_err(&dd->pdev->dev,
2666 "Memory allocation: flag buffer\n");
2667 return -ENOMEM;
2668 }
2669
2670 size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
2671 dd->port->flags);
2672 size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
2673 dd->dd_flag);
2674
2675 *offset = size <= len ? size : len;
2676 size = copy_to_user(ubuf, buf, *offset);
2677 if (size)
2678 rv = -EFAULT;
2679
2680 kfree(buf);
2681 return rv ? rv : *offset;
2682 }
2683
2684 static const struct file_operations mtip_device_status_fops = {
2685 .owner = THIS_MODULE,
2686 .open = simple_open,
2687 .read = mtip_hw_read_device_status,
2688 .llseek = no_llseek,
2689 };
2690
2691 static const struct file_operations mtip_regs_fops = {
2692 .owner = THIS_MODULE,
2693 .open = simple_open,
2694 .read = mtip_hw_read_registers,
2695 .llseek = no_llseek,
2696 };
2697
2698 static const struct file_operations mtip_flags_fops = {
2699 .owner = THIS_MODULE,
2700 .open = simple_open,
2701 .read = mtip_hw_read_flags,
2702 .llseek = no_llseek,
2703 };
2704
2705 /*
2706 * Create the sysfs related attributes.
2707 *
2708 * @dd Pointer to the driver data structure.
2709 * @kobj Pointer to the kobj for the block device.
2710 *
2711 * return value
2712 * 0 Operation completed successfully.
2713 * -EINVAL Invalid parameter.
2714 */
2715 static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
2716 {
2717 if (!kobj || !dd)
2718 return -EINVAL;
2719
2720 if (sysfs_create_file(kobj, &dev_attr_status.attr))
2721 dev_warn(&dd->pdev->dev,
2722 "Error creating 'status' sysfs entry\n");
2723 return 0;
2724 }
2725
2726 /*
2727 * Remove the sysfs related attributes.
2728 *
2729 * @dd Pointer to the driver data structure.
2730 * @kobj Pointer to the kobj for the block device.
2731 *
2732 * return value
2733 * 0 Operation completed successfully.
2734 * -EINVAL Invalid parameter.
2735 */
2736 static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
2737 {
2738 if (!kobj || !dd)
2739 return -EINVAL;
2740
2741 sysfs_remove_file(kobj, &dev_attr_status.attr);
2742
2743 return 0;
2744 }
2745
2746 static int mtip_hw_debugfs_init(struct driver_data *dd)
2747 {
2748 if (!dfs_parent)
2749 return -1;
2750
2751 dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
2752 if (IS_ERR_OR_NULL(dd->dfs_node)) {
2753 dev_warn(&dd->pdev->dev,
2754 "Error creating node %s under debugfs\n",
2755 dd->disk->disk_name);
2756 dd->dfs_node = NULL;
2757 return -1;
2758 }
2759
2760 debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
2761 &mtip_flags_fops);
2762 debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
2763 &mtip_regs_fops);
2764
2765 return 0;
2766 }
2767
2768 static void mtip_hw_debugfs_exit(struct driver_data *dd)
2769 {
2770 if (dd->dfs_node)
2771 debugfs_remove_recursive(dd->dfs_node);
2772 }
2773
2774 /*
2775 * Perform any init/resume time hardware setup
2776 *
2777 * @dd Pointer to the driver data structure.
2778 *
2779 * return value
2780 * None
2781 */
2782 static inline void hba_setup(struct driver_data *dd)
2783 {
2784 u32 hwdata;
2785 hwdata = readl(dd->mmio + HOST_HSORG);
2786
2787 /* interrupt bug workaround: use only 1 IS bit.*/
2788 writel(hwdata |
2789 HSORG_DISABLE_SLOTGRP_INTR |
2790 HSORG_DISABLE_SLOTGRP_PXIS,
2791 dd->mmio + HOST_HSORG);
2792 }
2793
2794 static int mtip_device_unaligned_constrained(struct driver_data *dd)
2795 {
2796 return (dd->pdev->device == P420M_DEVICE_ID ? 1 : 0);
2797 }
2798
2799 /*
2800 * Detect the details of the product, and store anything needed
2801 * into the driver data structure. This includes product type and
2802 * version and number of slot groups.
2803 *
2804 * @dd Pointer to the driver data structure.
2805 *
2806 * return value
2807 * None
2808 */
2809 static void mtip_detect_product(struct driver_data *dd)
2810 {
2811 u32 hwdata;
2812 unsigned int rev, slotgroups;
2813
2814 /*
2815 * HBA base + 0xFC [15:0] - vendor-specific hardware interface
2816 * info register:
2817 * [15:8] hardware/software interface rev#
2818 * [ 3] asic-style interface
2819 * [ 2:0] number of slot groups, minus 1 (only valid for asic-style).
2820 */
2821 hwdata = readl(dd->mmio + HOST_HSORG);
2822
2823 dd->product_type = MTIP_PRODUCT_UNKNOWN;
2824 dd->slot_groups = 1;
2825
2826 if (hwdata & 0x8) {
2827 dd->product_type = MTIP_PRODUCT_ASICFPGA;
2828 rev = (hwdata & HSORG_HWREV) >> 8;
2829 slotgroups = (hwdata & HSORG_SLOTGROUPS) + 1;
2830 dev_info(&dd->pdev->dev,
2831 "ASIC-FPGA design, HS rev 0x%x, "
2832 "%i slot groups [%i slots]\n",
2833 rev,
2834 slotgroups,
2835 slotgroups * 32);
2836
2837 if (slotgroups > MTIP_MAX_SLOT_GROUPS) {
2838 dev_warn(&dd->pdev->dev,
2839 "Warning: driver only supports "
2840 "%i slot groups.\n", MTIP_MAX_SLOT_GROUPS);
2841 slotgroups = MTIP_MAX_SLOT_GROUPS;
2842 }
2843 dd->slot_groups = slotgroups;
2844 return;
2845 }
2846
2847 dev_warn(&dd->pdev->dev, "Unrecognized product id\n");
2848 }
2849
2850 /*
2851 * Blocking wait for FTL rebuild to complete
2852 *
2853 * @dd Pointer to the DRIVER_DATA structure.
2854 *
2855 * return value
2856 * 0 FTL rebuild completed successfully
2857 * -EFAULT FTL rebuild error/timeout/interruption
2858 */
2859 static int mtip_ftl_rebuild_poll(struct driver_data *dd)
2860 {
2861 unsigned long timeout, cnt = 0, start;
2862
2863 dev_warn(&dd->pdev->dev,
2864 "FTL rebuild in progress. Polling for completion.\n");
2865
2866 start = jiffies;
2867 timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS);
2868
2869 do {
2870 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
2871 &dd->dd_flag)))
2872 return -EFAULT;
2873 if (mtip_check_surprise_removal(dd->pdev))
2874 return -EFAULT;
2875
2876 if (mtip_get_identify(dd->port, NULL) < 0)
2877 return -EFAULT;
2878
2879 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
2880 MTIP_FTL_REBUILD_MAGIC) {
2881 ssleep(1);
2882 /* Print message every 3 minutes */
2883 if (cnt++ >= 180) {
2884 dev_warn(&dd->pdev->dev,
2885 "FTL rebuild in progress (%d secs).\n",
2886 jiffies_to_msecs(jiffies - start) / 1000);
2887 cnt = 0;
2888 }
2889 } else {
2890 dev_warn(&dd->pdev->dev,
2891 "FTL rebuild complete (%d secs).\n",
2892 jiffies_to_msecs(jiffies - start) / 1000);
2893 mtip_block_initialize(dd);
2894 return 0;
2895 }
2896 } while (time_before(jiffies, timeout));
2897
2898 /* Check for timeout */
2899 dev_err(&dd->pdev->dev,
2900 "Timed out waiting for FTL rebuild to complete (%d secs).\n",
2901 jiffies_to_msecs(jiffies - start) / 1000);
2902 return -EFAULT;
2903 }
2904
2905 static void mtip_softirq_done_fn(struct request *rq)
2906 {
2907 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
2908 struct driver_data *dd = rq->q->queuedata;
2909
2910 /* Unmap the DMA scatter list entries */
2911 dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents,
2912 cmd->direction);
2913
2914 if (unlikely(cmd->unaligned))
2915 up(&dd->port->cmd_slot_unal);
2916
2917 blk_mq_end_request(rq, rq->errors);
2918 }
2919
2920 static void mtip_abort_cmd(struct request *req, void *data,
2921 bool reserved)
2922 {
2923 struct driver_data *dd = data;
2924
2925 dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
2926
2927 clear_bit(req->tag, dd->port->cmds_to_issue);
2928 req->errors = -EIO;
2929 mtip_softirq_done_fn(req);
2930 }
2931
2932 static void mtip_queue_cmd(struct request *req, void *data,
2933 bool reserved)
2934 {
2935 struct driver_data *dd = data;
2936
2937 set_bit(req->tag, dd->port->cmds_to_issue);
2938 blk_abort_request(req);
2939 }
2940
2941 /*
2942 * service thread to issue queued commands
2943 *
2944 * @data Pointer to the driver data structure.
2945 *
2946 * return value
2947 * 0
2948 */
2949
2950 static int mtip_service_thread(void *data)
2951 {
2952 struct driver_data *dd = (struct driver_data *)data;
2953 unsigned long slot, slot_start, slot_wrap, to;
2954 unsigned int num_cmd_slots = dd->slot_groups * 32;
2955 struct mtip_port *port = dd->port;
2956
2957 while (1) {
2958 if (kthread_should_stop() ||
2959 test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
2960 goto st_out;
2961 clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
2962
2963 /*
2964 * the condition is to check neither an internal command is
2965 * is in progress nor error handling is active
2966 */
2967 wait_event_interruptible(port->svc_wait, (port->flags) &&
2968 (port->flags & MTIP_PF_SVC_THD_WORK));
2969
2970 if (kthread_should_stop() ||
2971 test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
2972 goto st_out;
2973
2974 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
2975 &dd->dd_flag)))
2976 goto st_out;
2977
2978 set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
2979
2980 restart_eh:
2981 /* Demux bits: start with error handling */
2982 if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
2983 mtip_handle_tfe(dd);
2984 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
2985 }
2986
2987 if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags))
2988 goto restart_eh;
2989
2990 if (test_bit(MTIP_PF_TO_ACTIVE_BIT, &port->flags)) {
2991 to = jiffies + msecs_to_jiffies(5000);
2992
2993 do {
2994 mdelay(100);
2995 } while (atomic_read(&dd->irq_workers_active) != 0 &&
2996 time_before(jiffies, to));
2997
2998 if (atomic_read(&dd->irq_workers_active) != 0)
2999 dev_warn(&dd->pdev->dev,
3000 "Completion workers still active!");
3001
3002 spin_lock(dd->queue->queue_lock);
3003 blk_mq_tagset_busy_iter(&dd->tags,
3004 mtip_queue_cmd, dd);
3005 spin_unlock(dd->queue->queue_lock);
3006
3007 set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
3008
3009 if (mtip_device_reset(dd))
3010 blk_mq_tagset_busy_iter(&dd->tags,
3011 mtip_abort_cmd, dd);
3012
3013 clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
3014 }
3015
3016 if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
3017 slot = 1;
3018 /* used to restrict the loop to one iteration */
3019 slot_start = num_cmd_slots;
3020 slot_wrap = 0;
3021 while (1) {
3022 slot = find_next_bit(port->cmds_to_issue,
3023 num_cmd_slots, slot);
3024 if (slot_wrap == 1) {
3025 if ((slot_start >= slot) ||
3026 (slot >= num_cmd_slots))
3027 break;
3028 }
3029 if (unlikely(slot_start == num_cmd_slots))
3030 slot_start = slot;
3031
3032 if (unlikely(slot == num_cmd_slots)) {
3033 slot = 1;
3034 slot_wrap = 1;
3035 continue;
3036 }
3037
3038 /* Issue the command to the hardware */
3039 mtip_issue_ncq_command(port, slot);
3040
3041 clear_bit(slot, port->cmds_to_issue);
3042 }
3043
3044 clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
3045 }
3046
3047 if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
3048 if (mtip_ftl_rebuild_poll(dd) == 0)
3049 clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
3050 }
3051 }
3052
3053 st_out:
3054 return 0;
3055 }
3056
3057 /*
3058 * DMA region teardown
3059 *
3060 * @dd Pointer to driver_data structure
3061 *
3062 * return value
3063 * None
3064 */
3065 static void mtip_dma_free(struct driver_data *dd)
3066 {
3067 struct mtip_port *port = dd->port;
3068
3069 if (port->block1)
3070 dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
3071 port->block1, port->block1_dma);
3072
3073 if (port->command_list) {
3074 dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
3075 port->command_list, port->command_list_dma);
3076 }
3077 }
3078
3079 /*
3080 * DMA region setup
3081 *
3082 * @dd Pointer to driver_data structure
3083 *
3084 * return value
3085 * -ENOMEM Not enough free DMA region space to initialize driver
3086 */
3087 static int mtip_dma_alloc(struct driver_data *dd)
3088 {
3089 struct mtip_port *port = dd->port;
3090
3091 /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */
3092 port->block1 =
3093 dmam_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
3094 &port->block1_dma, GFP_KERNEL);
3095 if (!port->block1)
3096 return -ENOMEM;
3097 memset(port->block1, 0, BLOCK_DMA_ALLOC_SZ);
3098
3099 /* Allocate dma memory for command list */
3100 port->command_list =
3101 dmam_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
3102 &port->command_list_dma, GFP_KERNEL);
3103 if (!port->command_list) {
3104 dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
3105 port->block1, port->block1_dma);
3106 port->block1 = NULL;
3107 port->block1_dma = 0;
3108 return -ENOMEM;
3109 }
3110 memset(port->command_list, 0, AHCI_CMD_TBL_SZ);
3111
3112 /* Setup all pointers into first DMA region */
3113 port->rxfis = port->block1 + AHCI_RX_FIS_OFFSET;
3114 port->rxfis_dma = port->block1_dma + AHCI_RX_FIS_OFFSET;
3115 port->identify = port->block1 + AHCI_IDFY_OFFSET;
3116 port->identify_dma = port->block1_dma + AHCI_IDFY_OFFSET;
3117 port->log_buf = port->block1 + AHCI_SECTBUF_OFFSET;
3118 port->log_buf_dma = port->block1_dma + AHCI_SECTBUF_OFFSET;
3119 port->smart_buf = port->block1 + AHCI_SMARTBUF_OFFSET;
3120 port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET;
3121
3122 return 0;
3123 }
3124
3125 static int mtip_hw_get_identify(struct driver_data *dd)
3126 {
3127 struct smart_attr attr242;
3128 unsigned char *buf;
3129 int rv;
3130
3131 if (mtip_get_identify(dd->port, NULL) < 0)
3132 return -EFAULT;
3133
3134 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
3135 MTIP_FTL_REBUILD_MAGIC) {
3136 set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
3137 return MTIP_FTL_REBUILD_MAGIC;
3138 }
3139 mtip_dump_identify(dd->port);
3140
3141 /* check write protect, over temp and rebuild statuses */
3142 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
3143 dd->port->log_buf,
3144 dd->port->log_buf_dma, 1);
3145 if (rv) {
3146 dev_warn(&dd->pdev->dev,
3147 "Error in READ LOG EXT (10h) command\n");
3148 /* non-critical error, don't fail the load */
3149 } else {
3150 buf = (unsigned char *)dd->port->log_buf;
3151 if (buf[259] & 0x1) {
3152 dev_info(&dd->pdev->dev,
3153 "Write protect bit is set.\n");
3154 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
3155 }
3156 if (buf[288] == 0xF7) {
3157 dev_info(&dd->pdev->dev,
3158 "Exceeded Tmax, drive in thermal shutdown.\n");
3159 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
3160 }
3161 if (buf[288] == 0xBF) {
3162 dev_info(&dd->pdev->dev,
3163 "Drive indicates rebuild has failed.\n");
3164 set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
3165 }
3166 }
3167
3168 /* get write protect progess */
3169 memset(&attr242, 0, sizeof(struct smart_attr));
3170 if (mtip_get_smart_attr(dd->port, 242, &attr242))
3171 dev_warn(&dd->pdev->dev,
3172 "Unable to check write protect progress\n");
3173 else
3174 dev_info(&dd->pdev->dev,
3175 "Write protect progress: %u%% (%u blocks)\n",
3176 attr242.cur, le32_to_cpu(attr242.data));
3177
3178 return rv;
3179 }
3180
3181 /*
3182 * Called once for each card.
3183 *
3184 * @dd Pointer to the driver data structure.
3185 *
3186 * return value
3187 * 0 on success, else an error code.
3188 */
3189 static int mtip_hw_init(struct driver_data *dd)
3190 {
3191 int i;
3192 int rv;
3193 unsigned int num_command_slots;
3194 unsigned long timeout, timetaken;
3195
3196 dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
3197
3198 mtip_detect_product(dd);
3199 if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
3200 rv = -EIO;
3201 goto out1;
3202 }
3203 num_command_slots = dd->slot_groups * 32;
3204
3205 hba_setup(dd);
3206
3207 dd->port = kzalloc_node(sizeof(struct mtip_port), GFP_KERNEL,
3208 dd->numa_node);
3209 if (!dd->port) {
3210 dev_err(&dd->pdev->dev,
3211 "Memory allocation: port structure\n");
3212 return -ENOMEM;
3213 }
3214
3215 /* Continue workqueue setup */
3216 for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
3217 dd->work[i].port = dd->port;
3218
3219 /* Enable unaligned IO constraints for some devices */
3220 if (mtip_device_unaligned_constrained(dd))
3221 dd->unal_qdepth = MTIP_MAX_UNALIGNED_SLOTS;
3222 else
3223 dd->unal_qdepth = 0;
3224
3225 sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth);
3226
3227 /* Spinlock to prevent concurrent issue */
3228 for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
3229 spin_lock_init(&dd->port->cmd_issue_lock[i]);
3230
3231 /* Set the port mmio base address. */
3232 dd->port->mmio = dd->mmio + PORT_OFFSET;
3233 dd->port->dd = dd;
3234
3235 /* DMA allocations */
3236 rv = mtip_dma_alloc(dd);
3237 if (rv < 0)
3238 goto out1;
3239
3240 /* Setup the pointers to the extended s_active and CI registers. */
3241 for (i = 0; i < dd->slot_groups; i++) {
3242 dd->port->s_active[i] =
3243 dd->port->mmio + i*0x80 + PORT_SCR_ACT;
3244 dd->port->cmd_issue[i] =
3245 dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE;
3246 dd->port->completed[i] =
3247 dd->port->mmio + i*0x80 + PORT_SDBV;
3248 }
3249
3250 timetaken = jiffies;
3251 timeout = jiffies + msecs_to_jiffies(30000);
3252 while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) &&
3253 time_before(jiffies, timeout)) {
3254 mdelay(100);
3255 }
3256 if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
3257 timetaken = jiffies - timetaken;
3258 dev_warn(&dd->pdev->dev,
3259 "Surprise removal detected at %u ms\n",
3260 jiffies_to_msecs(timetaken));
3261 rv = -ENODEV;
3262 goto out2 ;
3263 }
3264 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
3265 timetaken = jiffies - timetaken;
3266 dev_warn(&dd->pdev->dev,
3267 "Removal detected at %u ms\n",
3268 jiffies_to_msecs(timetaken));
3269 rv = -EFAULT;
3270 goto out2;
3271 }
3272
3273 /* Conditionally reset the HBA. */
3274 if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) {
3275 if (mtip_hba_reset(dd) < 0) {
3276 dev_err(&dd->pdev->dev,
3277 "Card did not reset within timeout\n");
3278 rv = -EIO;
3279 goto out2;
3280 }
3281 } else {
3282 /* Clear any pending interrupts on the HBA */
3283 writel(readl(dd->mmio + HOST_IRQ_STAT),
3284 dd->mmio + HOST_IRQ_STAT);
3285 }
3286
3287 mtip_init_port(dd->port);
3288 mtip_start_port(dd->port);
3289
3290 /* Setup the ISR and enable interrupts. */
3291 rv = devm_request_irq(&dd->pdev->dev,
3292 dd->pdev->irq,
3293 mtip_irq_handler,
3294 IRQF_SHARED,
3295 dev_driver_string(&dd->pdev->dev),
3296 dd);
3297
3298 if (rv) {
3299 dev_err(&dd->pdev->dev,
3300 "Unable to allocate IRQ %d\n", dd->pdev->irq);
3301 goto out2;
3302 }
3303 irq_set_affinity_hint(dd->pdev->irq, get_cpu_mask(dd->isr_binding));
3304
3305 /* Enable interrupts on the HBA. */
3306 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
3307 dd->mmio + HOST_CTL);
3308
3309 init_waitqueue_head(&dd->port->svc_wait);
3310
3311 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
3312 rv = -EFAULT;
3313 goto out3;
3314 }
3315
3316 return rv;
3317
3318 out3:
3319 /* Disable interrupts on the HBA. */
3320 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
3321 dd->mmio + HOST_CTL);
3322
3323 /* Release the IRQ. */
3324 irq_set_affinity_hint(dd->pdev->irq, NULL);
3325 devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
3326
3327 out2:
3328 mtip_deinit_port(dd->port);
3329 mtip_dma_free(dd);
3330
3331 out1:
3332 /* Free the memory allocated for the for structure. */
3333 kfree(dd->port);
3334
3335 return rv;
3336 }
3337
3338 static int mtip_standby_drive(struct driver_data *dd)
3339 {
3340 int rv = 0;
3341
3342 if (dd->sr || !dd->port)
3343 return -ENODEV;
3344 /*
3345 * Send standby immediate (E0h) to the drive so that it
3346 * saves its state.
3347 */
3348 if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
3349 !test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
3350 !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
3351 rv = mtip_standby_immediate(dd->port);
3352 if (rv)
3353 dev_warn(&dd->pdev->dev,
3354 "STANDBY IMMEDIATE failed\n");
3355 }
3356 return rv;
3357 }
3358
3359 /*
3360 * Called to deinitialize an interface.
3361 *
3362 * @dd Pointer to the driver data structure.
3363 *
3364 * return value
3365 * 0
3366 */
3367 static int mtip_hw_exit(struct driver_data *dd)
3368 {
3369 if (!dd->sr) {
3370 /* de-initialize the port. */
3371 mtip_deinit_port(dd->port);
3372
3373 /* Disable interrupts on the HBA. */
3374 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
3375 dd->mmio + HOST_CTL);
3376 }
3377
3378 /* Release the IRQ. */
3379 irq_set_affinity_hint(dd->pdev->irq, NULL);
3380 devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
3381 msleep(1000);
3382
3383 /* Free dma regions */
3384 mtip_dma_free(dd);
3385
3386 /* Free the memory allocated for the for structure. */
3387 kfree(dd->port);
3388 dd->port = NULL;
3389
3390 return 0;
3391 }
3392
3393 /*
3394 * Issue a Standby Immediate command to the device.
3395 *
3396 * This function is called by the Block Layer just before the
3397 * system powers off during a shutdown.
3398 *
3399 * @dd Pointer to the driver data structure.
3400 *
3401 * return value
3402 * 0
3403 */
3404 static int mtip_hw_shutdown(struct driver_data *dd)
3405 {
3406 /*
3407 * Send standby immediate (E0h) to the drive so that it
3408 * saves its state.
3409 */
3410 mtip_standby_drive(dd);
3411
3412 return 0;
3413 }
3414
3415 /*
3416 * Suspend function
3417 *
3418 * This function is called by the Block Layer just before the
3419 * system hibernates.
3420 *
3421 * @dd Pointer to the driver data structure.
3422 *
3423 * return value
3424 * 0 Suspend was successful
3425 * -EFAULT Suspend was not successful
3426 */
3427 static int mtip_hw_suspend(struct driver_data *dd)
3428 {
3429 /*
3430 * Send standby immediate (E0h) to the drive
3431 * so that it saves its state.
3432 */
3433 if (mtip_standby_drive(dd) != 0) {
3434 dev_err(&dd->pdev->dev,
3435 "Failed standby-immediate command\n");
3436 return -EFAULT;
3437 }
3438
3439 /* Disable interrupts on the HBA.*/
3440 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
3441 dd->mmio + HOST_CTL);
3442 mtip_deinit_port(dd->port);
3443
3444 return 0;
3445 }
3446
3447 /*
3448 * Resume function
3449 *
3450 * This function is called by the Block Layer as the
3451 * system resumes.
3452 *
3453 * @dd Pointer to the driver data structure.
3454 *
3455 * return value
3456 * 0 Resume was successful
3457 * -EFAULT Resume was not successful
3458 */
3459 static int mtip_hw_resume(struct driver_data *dd)
3460 {
3461 /* Perform any needed hardware setup steps */
3462 hba_setup(dd);
3463
3464 /* Reset the HBA */
3465 if (mtip_hba_reset(dd) != 0) {
3466 dev_err(&dd->pdev->dev,
3467 "Unable to reset the HBA\n");
3468 return -EFAULT;
3469 }
3470
3471 /*
3472 * Enable the port, DMA engine, and FIS reception specific
3473 * h/w in controller.
3474 */
3475 mtip_init_port(dd->port);
3476 mtip_start_port(dd->port);
3477
3478 /* Enable interrupts on the HBA.*/
3479 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
3480 dd->mmio + HOST_CTL);
3481
3482 return 0;
3483 }
3484
3485 /*
3486 * Helper function for reusing disk name
3487 * upon hot insertion.
3488 */
3489 static int rssd_disk_name_format(char *prefix,
3490 int index,
3491 char *buf,
3492 int buflen)
3493 {
3494 const int base = 'z' - 'a' + 1;
3495 char *begin = buf + strlen(prefix);
3496 char *end = buf + buflen;
3497 char *p;
3498 int unit;
3499
3500 p = end - 1;
3501 *p = '\0';
3502 unit = base;
3503 do {
3504 if (p == begin)
3505 return -EINVAL;
3506 *--p = 'a' + (index % unit);
3507 index = (index / unit) - 1;
3508 } while (index >= 0);
3509
3510 memmove(begin, p, end - p);
3511 memcpy(buf, prefix, strlen(prefix));
3512
3513 return 0;
3514 }
3515
3516 /*
3517 * Block layer IOCTL handler.
3518 *
3519 * @dev Pointer to the block_device structure.
3520 * @mode ignored
3521 * @cmd IOCTL command passed from the user application.
3522 * @arg Argument passed from the user application.
3523 *
3524 * return value
3525 * 0 IOCTL completed successfully.
3526 * -ENOTTY IOCTL not supported or invalid driver data
3527 * structure pointer.
3528 */
3529 static int mtip_block_ioctl(struct block_device *dev,
3530 fmode_t mode,
3531 unsigned cmd,
3532 unsigned long arg)
3533 {
3534 struct driver_data *dd = dev->bd_disk->private_data;
3535
3536 if (!capable(CAP_SYS_ADMIN))
3537 return -EACCES;
3538
3539 if (!dd)
3540 return -ENOTTY;
3541
3542 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
3543 return -ENOTTY;
3544
3545 switch (cmd) {
3546 case BLKFLSBUF:
3547 return -ENOTTY;
3548 default:
3549 return mtip_hw_ioctl(dd, cmd, arg);
3550 }
3551 }
3552
3553 #ifdef CONFIG_COMPAT
3554 /*
3555 * Block layer compat IOCTL handler.
3556 *
3557 * @dev Pointer to the block_device structure.
3558 * @mode ignored
3559 * @cmd IOCTL command passed from the user application.
3560 * @arg Argument passed from the user application.
3561 *
3562 * return value
3563 * 0 IOCTL completed successfully.
3564 * -ENOTTY IOCTL not supported or invalid driver data
3565 * structure pointer.
3566 */
3567 static int mtip_block_compat_ioctl(struct block_device *dev,
3568 fmode_t mode,
3569 unsigned cmd,
3570 unsigned long arg)
3571 {
3572 struct driver_data *dd = dev->bd_disk->private_data;
3573
3574 if (!capable(CAP_SYS_ADMIN))
3575 return -EACCES;
3576
3577 if (!dd)
3578 return -ENOTTY;
3579
3580 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
3581 return -ENOTTY;
3582
3583 switch (cmd) {
3584 case BLKFLSBUF:
3585 return -ENOTTY;
3586 case HDIO_DRIVE_TASKFILE: {
3587 struct mtip_compat_ide_task_request_s __user *compat_req_task;
3588 ide_task_request_t req_task;
3589 int compat_tasksize, outtotal, ret;
3590
3591 compat_tasksize =
3592 sizeof(struct mtip_compat_ide_task_request_s);
3593
3594 compat_req_task =
3595 (struct mtip_compat_ide_task_request_s __user *) arg;
3596
3597 if (copy_from_user(&req_task, (void __user *) arg,
3598 compat_tasksize - (2 * sizeof(compat_long_t))))
3599 return -EFAULT;
3600
3601 if (get_user(req_task.out_size, &compat_req_task->out_size))
3602 return -EFAULT;
3603
3604 if (get_user(req_task.in_size, &compat_req_task->in_size))
3605 return -EFAULT;
3606
3607 outtotal = sizeof(struct mtip_compat_ide_task_request_s);
3608
3609 ret = exec_drive_taskfile(dd, (void __user *) arg,
3610 &req_task, outtotal);
3611
3612 if (copy_to_user((void __user *) arg, &req_task,
3613 compat_tasksize -
3614 (2 * sizeof(compat_long_t))))
3615 return -EFAULT;
3616
3617 if (put_user(req_task.out_size, &compat_req_task->out_size))
3618 return -EFAULT;
3619
3620 if (put_user(req_task.in_size, &compat_req_task->in_size))
3621 return -EFAULT;
3622
3623 return ret;
3624 }
3625 default:
3626 return mtip_hw_ioctl(dd, cmd, arg);
3627 }
3628 }
3629 #endif
3630
3631 /*
3632 * Obtain the geometry of the device.
3633 *
3634 * You may think that this function is obsolete, but some applications,
3635 * fdisk for example still used CHS values. This function describes the
3636 * device as having 224 heads and 56 sectors per cylinder. These values are
3637 * chosen so that each cylinder is aligned on a 4KB boundary. Since a
3638 * partition is described in terms of a start and end cylinder this means
3639 * that each partition is also 4KB aligned. Non-aligned partitions adversely
3640 * affects performance.
3641 *
3642 * @dev Pointer to the block_device strucutre.
3643 * @geo Pointer to a hd_geometry structure.
3644 *
3645 * return value
3646 * 0 Operation completed successfully.
3647 * -ENOTTY An error occurred while reading the drive capacity.
3648 */
3649 static int mtip_block_getgeo(struct block_device *dev,
3650 struct hd_geometry *geo)
3651 {
3652 struct driver_data *dd = dev->bd_disk->private_data;
3653 sector_t capacity;
3654
3655 if (!dd)
3656 return -ENOTTY;
3657
3658 if (!(mtip_hw_get_capacity(dd, &capacity))) {
3659 dev_warn(&dd->pdev->dev,
3660 "Could not get drive capacity.\n");
3661 return -ENOTTY;
3662 }
3663
3664 geo->heads = 224;
3665 geo->sectors = 56;
3666 sector_div(capacity, (geo->heads * geo->sectors));
3667 geo->cylinders = capacity;
3668 return 0;
3669 }
3670
3671 static int mtip_block_open(struct block_device *dev, fmode_t mode)
3672 {
3673 struct driver_data *dd;
3674
3675 if (dev && dev->bd_disk) {
3676 dd = (struct driver_data *) dev->bd_disk->private_data;
3677
3678 if (dd) {
3679 if (test_bit(MTIP_DDF_REMOVAL_BIT,
3680 &dd->dd_flag)) {
3681 return -ENODEV;
3682 }
3683 return 0;
3684 }
3685 }
3686 return -ENODEV;
3687 }
3688
3689 void mtip_block_release(struct gendisk *disk, fmode_t mode)
3690 {
3691 }
3692
3693 /*
3694 * Block device operation function.
3695 *
3696 * This structure contains pointers to the functions required by the block
3697 * layer.
3698 */
3699 static const struct block_device_operations mtip_block_ops = {
3700 .open = mtip_block_open,
3701 .release = mtip_block_release,
3702 .ioctl = mtip_block_ioctl,
3703 #ifdef CONFIG_COMPAT
3704 .compat_ioctl = mtip_block_compat_ioctl,
3705 #endif
3706 .getgeo = mtip_block_getgeo,
3707 .owner = THIS_MODULE
3708 };
3709
3710 static inline bool is_se_active(struct driver_data *dd)
3711 {
3712 if (unlikely(test_bit(MTIP_PF_SE_ACTIVE_BIT, &dd->port->flags))) {
3713 if (dd->port->ic_pause_timer) {
3714 unsigned long to = dd->port->ic_pause_timer +
3715 msecs_to_jiffies(1000);
3716 if (time_after(jiffies, to)) {
3717 clear_bit(MTIP_PF_SE_ACTIVE_BIT,
3718 &dd->port->flags);
3719 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
3720 dd->port->ic_pause_timer = 0;
3721 wake_up_interruptible(&dd->port->svc_wait);
3722 return false;
3723 }
3724 }
3725 return true;
3726 }
3727 return false;
3728 }
3729
3730 /*
3731 * Block layer make request function.
3732 *
3733 * This function is called by the kernel to process a BIO for
3734 * the P320 device.
3735 *
3736 * @queue Pointer to the request queue. Unused other than to obtain
3737 * the driver data structure.
3738 * @rq Pointer to the request.
3739 *
3740 */
3741 static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
3742 {
3743 struct driver_data *dd = hctx->queue->queuedata;
3744 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3745 unsigned int nents;
3746
3747 if (is_se_active(dd))
3748 return -ENODATA;
3749
3750 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
3751 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
3752 &dd->dd_flag))) {
3753 return -ENXIO;
3754 }
3755 if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) {
3756 return -ENODATA;
3757 }
3758 if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT,
3759 &dd->dd_flag) &&
3760 rq_data_dir(rq))) {
3761 return -ENODATA;
3762 }
3763 if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) ||
3764 test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)))
3765 return -ENODATA;
3766 }
3767
3768 if (req_op(rq) == REQ_OP_DISCARD) {
3769 int err;
3770
3771 err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
3772 blk_mq_end_request(rq, err);
3773 return 0;
3774 }
3775
3776 /* Create the scatter list for this request. */
3777 nents = blk_rq_map_sg(hctx->queue, rq, cmd->sg);
3778
3779 /* Issue the read/write. */
3780 mtip_hw_submit_io(dd, rq, cmd, nents, hctx);
3781 return 0;
3782 }
3783
3784 static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
3785 struct request *rq)
3786 {
3787 struct driver_data *dd = hctx->queue->queuedata;
3788 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3789
3790 if (rq_data_dir(rq) == READ || !dd->unal_qdepth)
3791 return false;
3792
3793 /*
3794 * If unaligned depth must be limited on this controller, mark it
3795 * as unaligned if the IO isn't on a 4k boundary (start of length).
3796 */
3797 if (blk_rq_sectors(rq) <= 64) {
3798 if ((blk_rq_pos(rq) & 7) || (blk_rq_sectors(rq) & 7))
3799 cmd->unaligned = 1;
3800 }
3801
3802 if (cmd->unaligned && down_trylock(&dd->port->cmd_slot_unal))
3803 return true;
3804
3805 return false;
3806 }
3807
3808 static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
3809 const struct blk_mq_queue_data *bd)
3810 {
3811 struct request *rq = bd->rq;
3812 int ret;
3813
3814 if (unlikely(mtip_check_unal_depth(hctx, rq)))
3815 return BLK_MQ_RQ_QUEUE_BUSY;
3816
3817 blk_mq_start_request(rq);
3818
3819 ret = mtip_submit_request(hctx, rq);
3820 if (likely(!ret))
3821 return BLK_MQ_RQ_QUEUE_OK;
3822
3823 rq->errors = ret;
3824 return BLK_MQ_RQ_QUEUE_ERROR;
3825 }
3826
3827 static void mtip_free_cmd(void *data, struct request *rq,
3828 unsigned int hctx_idx, unsigned int request_idx)
3829 {
3830 struct driver_data *dd = data;
3831 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3832
3833 if (!cmd->command)
3834 return;
3835
3836 dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
3837 cmd->command, cmd->command_dma);
3838 }
3839
3840 static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
3841 unsigned int request_idx, unsigned int numa_node)
3842 {
3843 struct driver_data *dd = data;
3844 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3845 u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
3846
3847 /*
3848 * For flush requests, request_idx starts at the end of the
3849 * tag space. Since we don't support FLUSH/FUA, simply return
3850 * 0 as there's nothing to be done.
3851 */
3852 if (request_idx >= MTIP_MAX_COMMAND_SLOTS)
3853 return 0;
3854
3855 cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
3856 &cmd->command_dma, GFP_KERNEL);
3857 if (!cmd->command)
3858 return -ENOMEM;
3859
3860 memset(cmd->command, 0, CMD_DMA_ALLOC_SZ);
3861
3862 /* Point the command headers at the command tables. */
3863 cmd->command_header = dd->port->command_list +
3864 (sizeof(struct mtip_cmd_hdr) * request_idx);
3865 cmd->command_header_dma = dd->port->command_list_dma +
3866 (sizeof(struct mtip_cmd_hdr) * request_idx);
3867
3868 if (host_cap_64)
3869 cmd->command_header->ctbau = __force_bit2int cpu_to_le32((cmd->command_dma >> 16) >> 16);
3870
3871 cmd->command_header->ctba = __force_bit2int cpu_to_le32(cmd->command_dma & 0xFFFFFFFF);
3872
3873 sg_init_table(cmd->sg, MTIP_MAX_SG);
3874 return 0;
3875 }
3876
3877 static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
3878 bool reserved)
3879 {
3880 struct driver_data *dd = req->q->queuedata;
3881
3882 if (reserved)
3883 goto exit_handler;
3884
3885 if (test_bit(req->tag, dd->port->cmds_to_issue))
3886 goto exit_handler;
3887
3888 if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags))
3889 goto exit_handler;
3890
3891 wake_up_interruptible(&dd->port->svc_wait);
3892 exit_handler:
3893 return BLK_EH_RESET_TIMER;
3894 }
3895
3896 static struct blk_mq_ops mtip_mq_ops = {
3897 .queue_rq = mtip_queue_rq,
3898 .map_queue = blk_mq_map_queue,
3899 .init_request = mtip_init_cmd,
3900 .exit_request = mtip_free_cmd,
3901 .complete = mtip_softirq_done_fn,
3902 .timeout = mtip_cmd_timeout,
3903 };
3904
3905 /*
3906 * Block layer initialization function.
3907 *
3908 * This function is called once by the PCI layer for each P320
3909 * device that is connected to the system.
3910 *
3911 * @dd Pointer to the driver data structure.
3912 *
3913 * return value
3914 * 0 on success else an error code.
3915 */
3916 static int mtip_block_initialize(struct driver_data *dd)
3917 {
3918 int rv = 0, wait_for_rebuild = 0;
3919 sector_t capacity;
3920 unsigned int index = 0;
3921 struct kobject *kobj;
3922
3923 if (dd->disk)
3924 goto skip_create_disk; /* hw init done, before rebuild */
3925
3926 if (mtip_hw_init(dd)) {
3927 rv = -EINVAL;
3928 goto protocol_init_error;
3929 }
3930
3931 dd->disk = alloc_disk_node(MTIP_MAX_MINORS, dd->numa_node);
3932 if (dd->disk == NULL) {
3933 dev_err(&dd->pdev->dev,
3934 "Unable to allocate gendisk structure\n");
3935 rv = -EINVAL;
3936 goto alloc_disk_error;
3937 }
3938
3939 /* Generate the disk name, implemented same as in sd.c */
3940 do {
3941 if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL))
3942 goto ida_get_error;
3943
3944 spin_lock(&rssd_index_lock);
3945 rv = ida_get_new(&rssd_index_ida, &index);
3946 spin_unlock(&rssd_index_lock);
3947 } while (rv == -EAGAIN);
3948
3949 if (rv)
3950 goto ida_get_error;
3951
3952 rv = rssd_disk_name_format("rssd",
3953 index,
3954 dd->disk->disk_name,
3955 DISK_NAME_LEN);
3956 if (rv)
3957 goto disk_index_error;
3958
3959 dd->disk->major = dd->major;
3960 dd->disk->first_minor = index * MTIP_MAX_MINORS;
3961 dd->disk->minors = MTIP_MAX_MINORS;
3962 dd->disk->fops = &mtip_block_ops;
3963 dd->disk->private_data = dd;
3964 dd->index = index;
3965
3966 mtip_hw_debugfs_init(dd);
3967
3968 memset(&dd->tags, 0, sizeof(dd->tags));
3969 dd->tags.ops = &mtip_mq_ops;
3970 dd->tags.nr_hw_queues = 1;
3971 dd->tags.queue_depth = MTIP_MAX_COMMAND_SLOTS;
3972 dd->tags.reserved_tags = 1;
3973 dd->tags.cmd_size = sizeof(struct mtip_cmd);
3974 dd->tags.numa_node = dd->numa_node;
3975 dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
3976 dd->tags.driver_data = dd;
3977 dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
3978
3979 rv = blk_mq_alloc_tag_set(&dd->tags);
3980 if (rv) {
3981 dev_err(&dd->pdev->dev,
3982 "Unable to allocate request queue\n");
3983 goto block_queue_alloc_tag_error;
3984 }
3985
3986 /* Allocate the request queue. */
3987 dd->queue = blk_mq_init_queue(&dd->tags);
3988 if (IS_ERR(dd->queue)) {
3989 dev_err(&dd->pdev->dev,
3990 "Unable to allocate request queue\n");
3991 rv = -ENOMEM;
3992 goto block_queue_alloc_init_error;
3993 }
3994
3995 dd->disk->queue = dd->queue;
3996 dd->queue->queuedata = dd;
3997
3998 skip_create_disk:
3999 /* Initialize the protocol layer. */
4000 wait_for_rebuild = mtip_hw_get_identify(dd);
4001 if (wait_for_rebuild < 0) {
4002 dev_err(&dd->pdev->dev,
4003 "Protocol layer initialization failed\n");
4004 rv = -EINVAL;
4005 goto init_hw_cmds_error;
4006 }
4007
4008 /*
4009 * if rebuild pending, start the service thread, and delay the block
4010 * queue creation and device_add_disk()
4011 */
4012 if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
4013 goto start_service_thread;
4014
4015 /* Set device limits. */
4016 set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
4017 clear_bit(QUEUE_FLAG_ADD_RANDOM, &dd->queue->queue_flags);
4018 blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
4019 blk_queue_physical_block_size(dd->queue, 4096);
4020 blk_queue_max_hw_sectors(dd->queue, 0xffff);
4021 blk_queue_max_segment_size(dd->queue, 0x400000);
4022 blk_queue_io_min(dd->queue, 4096);
4023 blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask);
4024
4025 /* Signal trim support */
4026 if (dd->trim_supp == true) {
4027 set_bit(QUEUE_FLAG_DISCARD, &dd->queue->queue_flags);
4028 dd->queue->limits.discard_granularity = 4096;
4029 blk_queue_max_discard_sectors(dd->queue,
4030 MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES);
4031 dd->queue->limits.discard_zeroes_data = 0;
4032 }
4033
4034 /* Set the capacity of the device in 512 byte sectors. */
4035 if (!(mtip_hw_get_capacity(dd, &capacity))) {
4036 dev_warn(&dd->pdev->dev,
4037 "Could not read drive capacity\n");
4038 rv = -EIO;
4039 goto read_capacity_error;
4040 }
4041 set_capacity(dd->disk, capacity);
4042
4043 /* Enable the block device and add it to /dev */
4044 device_add_disk(&dd->pdev->dev, dd->disk);
4045
4046 dd->bdev = bdget_disk(dd->disk, 0);
4047 /*
4048 * Now that the disk is active, initialize any sysfs attributes
4049 * managed by the protocol layer.
4050 */
4051 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
4052 if (kobj) {
4053 mtip_hw_sysfs_init(dd, kobj);
4054 kobject_put(kobj);
4055 }
4056
4057 if (dd->mtip_svc_handler) {
4058 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
4059 return rv; /* service thread created for handling rebuild */
4060 }
4061
4062 start_service_thread:
4063 dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread,
4064 dd, dd->numa_node,
4065 "mtip_svc_thd_%02d", index);
4066
4067 if (IS_ERR(dd->mtip_svc_handler)) {
4068 dev_err(&dd->pdev->dev, "service thread failed to start\n");
4069 dd->mtip_svc_handler = NULL;
4070 rv = -EFAULT;
4071 goto kthread_run_error;
4072 }
4073 wake_up_process(dd->mtip_svc_handler);
4074 if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
4075 rv = wait_for_rebuild;
4076
4077 return rv;
4078
4079 kthread_run_error:
4080 bdput(dd->bdev);
4081 dd->bdev = NULL;
4082
4083 /* Delete our gendisk. This also removes the device from /dev */
4084 del_gendisk(dd->disk);
4085
4086 read_capacity_error:
4087 init_hw_cmds_error:
4088 blk_cleanup_queue(dd->queue);
4089 block_queue_alloc_init_error:
4090 blk_mq_free_tag_set(&dd->tags);
4091 block_queue_alloc_tag_error:
4092 mtip_hw_debugfs_exit(dd);
4093 disk_index_error:
4094 spin_lock(&rssd_index_lock);
4095 ida_remove(&rssd_index_ida, index);
4096 spin_unlock(&rssd_index_lock);
4097
4098 ida_get_error:
4099 put_disk(dd->disk);
4100
4101 alloc_disk_error:
4102 mtip_hw_exit(dd); /* De-initialize the protocol layer. */
4103
4104 protocol_init_error:
4105 return rv;
4106 }
4107
4108 static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
4109 {
4110 struct driver_data *dd = (struct driver_data *)data;
4111 struct mtip_cmd *cmd;
4112
4113 if (likely(!reserv))
4114 blk_mq_complete_request(rq, -ENODEV);
4115 else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) {
4116
4117 cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
4118 if (cmd->comp_func)
4119 cmd->comp_func(dd->port, MTIP_TAG_INTERNAL,
4120 cmd, -ENODEV);
4121 }
4122 }
4123
4124 /*
4125 * Block layer deinitialization function.
4126 *
4127 * Called by the PCI layer as each P320 device is removed.
4128 *
4129 * @dd Pointer to the driver data structure.
4130 *
4131 * return value
4132 * 0
4133 */
4134 static int mtip_block_remove(struct driver_data *dd)
4135 {
4136 struct kobject *kobj;
4137
4138 mtip_hw_debugfs_exit(dd);
4139
4140 if (dd->mtip_svc_handler) {
4141 set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
4142 wake_up_interruptible(&dd->port->svc_wait);
4143 kthread_stop(dd->mtip_svc_handler);
4144 }
4145
4146 /* Clean up the sysfs attributes, if created */
4147 if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
4148 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
4149 if (kobj) {
4150 mtip_hw_sysfs_exit(dd, kobj);
4151 kobject_put(kobj);
4152 }
4153 }
4154
4155 if (!dd->sr) {
4156 /*
4157 * Explicitly wait here for IOs to quiesce,
4158 * as mtip_standby_drive usually won't wait for IOs.
4159 */
4160 if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS,
4161 GFP_KERNEL))
4162 mtip_standby_drive(dd);
4163 }
4164 else
4165 dev_info(&dd->pdev->dev, "device %s surprise removal\n",
4166 dd->disk->disk_name);
4167
4168 blk_mq_freeze_queue_start(dd->queue);
4169 blk_mq_stop_hw_queues(dd->queue);
4170 blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
4171
4172 /*
4173 * Delete our gendisk structure. This also removes the device
4174 * from /dev
4175 */
4176 if (dd->bdev) {
4177 bdput(dd->bdev);
4178 dd->bdev = NULL;
4179 }
4180 if (dd->disk) {
4181 if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
4182 del_gendisk(dd->disk);
4183 if (dd->disk->queue) {
4184 blk_cleanup_queue(dd->queue);
4185 blk_mq_free_tag_set(&dd->tags);
4186 dd->queue = NULL;
4187 }
4188 put_disk(dd->disk);
4189 }
4190 dd->disk = NULL;
4191
4192 spin_lock(&rssd_index_lock);
4193 ida_remove(&rssd_index_ida, dd->index);
4194 spin_unlock(&rssd_index_lock);
4195
4196 /* De-initialize the protocol layer. */
4197 mtip_hw_exit(dd);
4198
4199 return 0;
4200 }
4201
4202 /*
4203 * Function called by the PCI layer when just before the
4204 * machine shuts down.
4205 *
4206 * If a protocol layer shutdown function is present it will be called
4207 * by this function.
4208 *
4209 * @dd Pointer to the driver data structure.
4210 *
4211 * return value
4212 * 0
4213 */
4214 static int mtip_block_shutdown(struct driver_data *dd)
4215 {
4216 mtip_hw_shutdown(dd);
4217
4218 /* Delete our gendisk structure, and cleanup the blk queue. */
4219 if (dd->disk) {
4220 dev_info(&dd->pdev->dev,
4221 "Shutting down %s ...\n", dd->disk->disk_name);
4222
4223 if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
4224 del_gendisk(dd->disk);
4225 if (dd->disk->queue) {
4226 blk_cleanup_queue(dd->queue);
4227 blk_mq_free_tag_set(&dd->tags);
4228 }
4229 put_disk(dd->disk);
4230 dd->disk = NULL;
4231 dd->queue = NULL;
4232 }
4233
4234 spin_lock(&rssd_index_lock);
4235 ida_remove(&rssd_index_ida, dd->index);
4236 spin_unlock(&rssd_index_lock);
4237 return 0;
4238 }
4239
4240 static int mtip_block_suspend(struct driver_data *dd)
4241 {
4242 dev_info(&dd->pdev->dev,
4243 "Suspending %s ...\n", dd->disk->disk_name);
4244 mtip_hw_suspend(dd);
4245 return 0;
4246 }
4247
4248 static int mtip_block_resume(struct driver_data *dd)
4249 {
4250 dev_info(&dd->pdev->dev, "Resuming %s ...\n",
4251 dd->disk->disk_name);
4252 mtip_hw_resume(dd);
4253 return 0;
4254 }
4255
4256 static void drop_cpu(int cpu)
4257 {
4258 cpu_use[cpu]--;
4259 }
4260
4261 static int get_least_used_cpu_on_node(int node)
4262 {
4263 int cpu, least_used_cpu, least_cnt;
4264 const struct cpumask *node_mask;
4265
4266 node_mask = cpumask_of_node(node);
4267 least_used_cpu = cpumask_first(node_mask);
4268 least_cnt = cpu_use[least_used_cpu];
4269 cpu = least_used_cpu;
4270
4271 for_each_cpu(cpu, node_mask) {
4272 if (cpu_use[cpu] < least_cnt) {
4273 least_used_cpu = cpu;
4274 least_cnt = cpu_use[cpu];
4275 }
4276 }
4277 cpu_use[least_used_cpu]++;
4278 return least_used_cpu;
4279 }
4280
4281 /* Helper for selecting a node in round robin mode */
4282 static inline int mtip_get_next_rr_node(void)
4283 {
4284 static int next_node = -1;
4285
4286 if (next_node == -1) {
4287 next_node = first_online_node;
4288 return next_node;
4289 }
4290
4291 next_node = next_online_node(next_node);
4292 if (next_node == MAX_NUMNODES)
4293 next_node = first_online_node;
4294 return next_node;
4295 }
4296
4297 static DEFINE_HANDLER(0);
4298 static DEFINE_HANDLER(1);
4299 static DEFINE_HANDLER(2);
4300 static DEFINE_HANDLER(3);
4301 static DEFINE_HANDLER(4);
4302 static DEFINE_HANDLER(5);
4303 static DEFINE_HANDLER(6);
4304 static DEFINE_HANDLER(7);
4305
4306 static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev)
4307 {
4308 int pos;
4309 unsigned short pcie_dev_ctrl;
4310
4311 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
4312 if (pos) {
4313 pci_read_config_word(pdev,
4314 pos + PCI_EXP_DEVCTL,
4315 &pcie_dev_ctrl);
4316 if (pcie_dev_ctrl & (1 << 11) ||
4317 pcie_dev_ctrl & (1 << 4)) {
4318 dev_info(&dd->pdev->dev,
4319 "Disabling ERO/No-Snoop on bridge device %04x:%04x\n",
4320 pdev->vendor, pdev->device);
4321 pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN |
4322 PCI_EXP_DEVCTL_RELAX_EN);
4323 pci_write_config_word(pdev,
4324 pos + PCI_EXP_DEVCTL,
4325 pcie_dev_ctrl);
4326 }
4327 }
4328 }
4329
4330 static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev)
4331 {
4332 /*
4333 * This workaround is specific to AMD/ATI chipset with a PCI upstream
4334 * device with device id 0x5aXX
4335 */
4336 if (pdev->bus && pdev->bus->self) {
4337 if (pdev->bus->self->vendor == PCI_VENDOR_ID_ATI &&
4338 ((pdev->bus->self->device & 0xff00) == 0x5a00)) {
4339 mtip_disable_link_opts(dd, pdev->bus->self);
4340 } else {
4341 /* Check further up the topology */
4342 struct pci_dev *parent_dev = pdev->bus->self;
4343 if (parent_dev->bus &&
4344 parent_dev->bus->parent &&
4345 parent_dev->bus->parent->self &&
4346 parent_dev->bus->parent->self->vendor ==
4347 PCI_VENDOR_ID_ATI &&
4348 (parent_dev->bus->parent->self->device &
4349 0xff00) == 0x5a00) {
4350 mtip_disable_link_opts(dd,
4351 parent_dev->bus->parent->self);
4352 }
4353 }
4354 }
4355 }
4356
4357 /*
4358 * Called for each supported PCI device detected.
4359 *
4360 * This function allocates the private data structure, enables the
4361 * PCI device and then calls the block layer initialization function.
4362 *
4363 * return value
4364 * 0 on success else an error code.
4365 */
4366 static int mtip_pci_probe(struct pci_dev *pdev,
4367 const struct pci_device_id *ent)
4368 {
4369 int rv = 0;
4370 struct driver_data *dd = NULL;
4371 char cpu_list[256];
4372 const struct cpumask *node_mask;
4373 int cpu, i = 0, j = 0;
4374 int my_node = NUMA_NO_NODE;
4375 unsigned long flags;
4376
4377 /* Allocate memory for this devices private data. */
4378 my_node = pcibus_to_node(pdev->bus);
4379 if (my_node != NUMA_NO_NODE) {
4380 if (!node_online(my_node))
4381 my_node = mtip_get_next_rr_node();
4382 } else {
4383 dev_info(&pdev->dev, "Kernel not reporting proximity, choosing a node\n");
4384 my_node = mtip_get_next_rr_node();
4385 }
4386 dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n",
4387 my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev),
4388 cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id());
4389
4390 dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
4391 if (dd == NULL) {
4392 dev_err(&pdev->dev,
4393 "Unable to allocate memory for driver data\n");
4394 return -ENOMEM;
4395 }
4396
4397 /* Attach the private data to this PCI device. */
4398 pci_set_drvdata(pdev, dd);
4399
4400 rv = pcim_enable_device(pdev);
4401 if (rv < 0) {
4402 dev_err(&pdev->dev, "Unable to enable device\n");
4403 goto iomap_err;
4404 }
4405
4406 /* Map BAR5 to memory. */
4407 rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME);
4408 if (rv < 0) {
4409 dev_err(&pdev->dev, "Unable to map regions\n");
4410 goto iomap_err;
4411 }
4412
4413 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4414 rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4415
4416 if (rv) {
4417 rv = pci_set_consistent_dma_mask(pdev,
4418 DMA_BIT_MASK(32));
4419 if (rv) {
4420 dev_warn(&pdev->dev,
4421 "64-bit DMA enable failed\n");
4422 goto setmask_err;
4423 }
4424 }
4425 }
4426
4427 /* Copy the info we may need later into the private data structure. */
4428 dd->major = mtip_major;
4429 dd->instance = instance;
4430 dd->pdev = pdev;
4431 dd->numa_node = my_node;
4432
4433 INIT_LIST_HEAD(&dd->online_list);
4434 INIT_LIST_HEAD(&dd->remove_list);
4435
4436 memset(dd->workq_name, 0, 32);
4437 snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
4438
4439 dd->isr_workq = create_workqueue(dd->workq_name);
4440 if (!dd->isr_workq) {
4441 dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
4442 rv = -ENOMEM;
4443 goto block_initialize_err;
4444 }
4445
4446 memset(cpu_list, 0, sizeof(cpu_list));
4447
4448 node_mask = cpumask_of_node(dd->numa_node);
4449 if (!cpumask_empty(node_mask)) {
4450 for_each_cpu(cpu, node_mask)
4451 {
4452 snprintf(&cpu_list[j], 256 - j, "%d ", cpu);
4453 j = strlen(cpu_list);
4454 }
4455
4456 dev_info(&pdev->dev, "Node %d on package %d has %d cpu(s): %s\n",
4457 dd->numa_node,
4458 topology_physical_package_id(cpumask_first(node_mask)),
4459 nr_cpus_node(dd->numa_node),
4460 cpu_list);
4461 } else
4462 dev_dbg(&pdev->dev, "mtip32xx: node_mask empty\n");
4463
4464 dd->isr_binding = get_least_used_cpu_on_node(dd->numa_node);
4465 dev_info(&pdev->dev, "Initial IRQ binding node:cpu %d:%d\n",
4466 cpu_to_node(dd->isr_binding), dd->isr_binding);
4467
4468 /* first worker context always runs in ISR */
4469 dd->work[0].cpu_binding = dd->isr_binding;
4470 dd->work[1].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
4471 dd->work[2].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
4472 dd->work[3].cpu_binding = dd->work[0].cpu_binding;
4473 dd->work[4].cpu_binding = dd->work[1].cpu_binding;
4474 dd->work[5].cpu_binding = dd->work[2].cpu_binding;
4475 dd->work[6].cpu_binding = dd->work[2].cpu_binding;
4476 dd->work[7].cpu_binding = dd->work[1].cpu_binding;
4477
4478 /* Log the bindings */
4479 for_each_present_cpu(cpu) {
4480 memset(cpu_list, 0, sizeof(cpu_list));
4481 for (i = 0, j = 0; i < MTIP_MAX_SLOT_GROUPS; i++) {
4482 if (dd->work[i].cpu_binding == cpu) {
4483 snprintf(&cpu_list[j], 256 - j, "%d ", i);
4484 j = strlen(cpu_list);
4485 }
4486 }
4487 if (j)
4488 dev_info(&pdev->dev, "CPU %d: WQs %s\n", cpu, cpu_list);
4489 }
4490
4491 INIT_WORK(&dd->work[0].work, mtip_workq_sdbf0);
4492 INIT_WORK(&dd->work[1].work, mtip_workq_sdbf1);
4493 INIT_WORK(&dd->work[2].work, mtip_workq_sdbf2);
4494 INIT_WORK(&dd->work[3].work, mtip_workq_sdbf3);
4495 INIT_WORK(&dd->work[4].work, mtip_workq_sdbf4);
4496 INIT_WORK(&dd->work[5].work, mtip_workq_sdbf5);
4497 INIT_WORK(&dd->work[6].work, mtip_workq_sdbf6);
4498 INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7);
4499
4500 pci_set_master(pdev);
4501 rv = pci_enable_msi(pdev);
4502 if (rv) {
4503 dev_warn(&pdev->dev,
4504 "Unable to enable MSI interrupt.\n");
4505 goto msi_initialize_err;
4506 }
4507
4508 mtip_fix_ero_nosnoop(dd, pdev);
4509
4510 /* Initialize the block layer. */
4511 rv = mtip_block_initialize(dd);
4512 if (rv < 0) {
4513 dev_err(&pdev->dev,
4514 "Unable to initialize block layer\n");
4515 goto block_initialize_err;
4516 }
4517
4518 /*
4519 * Increment the instance count so that each device has a unique
4520 * instance number.
4521 */
4522 instance++;
4523 if (rv != MTIP_FTL_REBUILD_MAGIC)
4524 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
4525 else
4526 rv = 0; /* device in rebuild state, return 0 from probe */
4527
4528 /* Add to online list even if in ftl rebuild */
4529 spin_lock_irqsave(&dev_lock, flags);
4530 list_add(&dd->online_list, &online_list);
4531 spin_unlock_irqrestore(&dev_lock, flags);
4532
4533 goto done;
4534
4535 block_initialize_err:
4536 pci_disable_msi(pdev);
4537
4538 msi_initialize_err:
4539 if (dd->isr_workq) {
4540 flush_workqueue(dd->isr_workq);
4541 destroy_workqueue(dd->isr_workq);
4542 drop_cpu(dd->work[0].cpu_binding);
4543 drop_cpu(dd->work[1].cpu_binding);
4544 drop_cpu(dd->work[2].cpu_binding);
4545 }
4546 setmask_err:
4547 pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
4548
4549 iomap_err:
4550 kfree(dd);
4551 pci_set_drvdata(pdev, NULL);
4552 return rv;
4553 done:
4554 return rv;
4555 }
4556
4557 /*
4558 * Called for each probed device when the device is removed or the
4559 * driver is unloaded.
4560 *
4561 * return value
4562 * None
4563 */
4564 static void mtip_pci_remove(struct pci_dev *pdev)
4565 {
4566 struct driver_data *dd = pci_get_drvdata(pdev);
4567 unsigned long flags, to;
4568
4569 set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
4570
4571 spin_lock_irqsave(&dev_lock, flags);
4572 list_del_init(&dd->online_list);
4573 list_add(&dd->remove_list, &removing_list);
4574 spin_unlock_irqrestore(&dev_lock, flags);
4575
4576 mtip_check_surprise_removal(pdev);
4577 synchronize_irq(dd->pdev->irq);
4578
4579 /* Spin until workers are done */
4580 to = jiffies + msecs_to_jiffies(4000);
4581 do {
4582 msleep(20);
4583 } while (atomic_read(&dd->irq_workers_active) != 0 &&
4584 time_before(jiffies, to));
4585
4586 if (!dd->sr)
4587 fsync_bdev(dd->bdev);
4588
4589 if (atomic_read(&dd->irq_workers_active) != 0) {
4590 dev_warn(&dd->pdev->dev,
4591 "Completion workers still active!\n");
4592 }
4593
4594 blk_set_queue_dying(dd->queue);
4595 set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
4596
4597 /* Clean up the block layer. */
4598 mtip_block_remove(dd);
4599
4600 if (dd->isr_workq) {
4601 flush_workqueue(dd->isr_workq);
4602 destroy_workqueue(dd->isr_workq);
4603 drop_cpu(dd->work[0].cpu_binding);
4604 drop_cpu(dd->work[1].cpu_binding);
4605 drop_cpu(dd->work[2].cpu_binding);
4606 }
4607
4608 pci_disable_msi(pdev);
4609
4610 spin_lock_irqsave(&dev_lock, flags);
4611 list_del_init(&dd->remove_list);
4612 spin_unlock_irqrestore(&dev_lock, flags);
4613
4614 kfree(dd);
4615
4616 pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
4617 pci_set_drvdata(pdev, NULL);
4618 }
4619
4620 /*
4621 * Called for each probed device when the device is suspended.
4622 *
4623 * return value
4624 * 0 Success
4625 * <0 Error
4626 */
4627 static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
4628 {
4629 int rv = 0;
4630 struct driver_data *dd = pci_get_drvdata(pdev);
4631
4632 if (!dd) {
4633 dev_err(&pdev->dev,
4634 "Driver private datastructure is NULL\n");
4635 return -EFAULT;
4636 }
4637
4638 set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
4639
4640 /* Disable ports & interrupts then send standby immediate */
4641 rv = mtip_block_suspend(dd);
4642 if (rv < 0) {
4643 dev_err(&pdev->dev,
4644 "Failed to suspend controller\n");
4645 return rv;
4646 }
4647
4648 /*
4649 * Save the pci config space to pdev structure &
4650 * disable the device
4651 */
4652 pci_save_state(pdev);
4653 pci_disable_device(pdev);
4654
4655 /* Move to Low power state*/
4656 pci_set_power_state(pdev, PCI_D3hot);
4657
4658 return rv;
4659 }
4660
4661 /*
4662 * Called for each probed device when the device is resumed.
4663 *
4664 * return value
4665 * 0 Success
4666 * <0 Error
4667 */
4668 static int mtip_pci_resume(struct pci_dev *pdev)
4669 {
4670 int rv = 0;
4671 struct driver_data *dd;
4672
4673 dd = pci_get_drvdata(pdev);
4674 if (!dd) {
4675 dev_err(&pdev->dev,
4676 "Driver private datastructure is NULL\n");
4677 return -EFAULT;
4678 }
4679
4680 /* Move the device to active State */
4681 pci_set_power_state(pdev, PCI_D0);
4682
4683 /* Restore PCI configuration space */
4684 pci_restore_state(pdev);
4685
4686 /* Enable the PCI device*/
4687 rv = pcim_enable_device(pdev);
4688 if (rv < 0) {
4689 dev_err(&pdev->dev,
4690 "Failed to enable card during resume\n");
4691 goto err;
4692 }
4693 pci_set_master(pdev);
4694
4695 /*
4696 * Calls hbaReset, initPort, & startPort function
4697 * then enables interrupts
4698 */
4699 rv = mtip_block_resume(dd);
4700 if (rv < 0)
4701 dev_err(&pdev->dev, "Unable to resume\n");
4702
4703 err:
4704 clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
4705
4706 return rv;
4707 }
4708
4709 /*
4710 * Shutdown routine
4711 *
4712 * return value
4713 * None
4714 */
4715 static void mtip_pci_shutdown(struct pci_dev *pdev)
4716 {
4717 struct driver_data *dd = pci_get_drvdata(pdev);
4718 if (dd)
4719 mtip_block_shutdown(dd);
4720 }
4721
4722 /* Table of device ids supported by this driver. */
4723 static const struct pci_device_id mtip_pci_tbl[] = {
4724 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320H_DEVICE_ID) },
4725 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320M_DEVICE_ID) },
4726 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320S_DEVICE_ID) },
4727 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P325M_DEVICE_ID) },
4728 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420H_DEVICE_ID) },
4729 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420M_DEVICE_ID) },
4730 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P425M_DEVICE_ID) },
4731 { 0 }
4732 };
4733
4734 /* Structure that describes the PCI driver functions. */
4735 static struct pci_driver mtip_pci_driver = {
4736 .name = MTIP_DRV_NAME,
4737 .id_table = mtip_pci_tbl,
4738 .probe = mtip_pci_probe,
4739 .remove = mtip_pci_remove,
4740 .suspend = mtip_pci_suspend,
4741 .resume = mtip_pci_resume,
4742 .shutdown = mtip_pci_shutdown,
4743 };
4744
4745 MODULE_DEVICE_TABLE(pci, mtip_pci_tbl);
4746
4747 /*
4748 * Module initialization function.
4749 *
4750 * Called once when the module is loaded. This function allocates a major
4751 * block device number to the Cyclone devices and registers the PCI layer
4752 * of the driver.
4753 *
4754 * Return value
4755 * 0 on success else error code.
4756 */
4757 static int __init mtip_init(void)
4758 {
4759 int error;
4760
4761 pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
4762
4763 spin_lock_init(&dev_lock);
4764
4765 INIT_LIST_HEAD(&online_list);
4766 INIT_LIST_HEAD(&removing_list);
4767
4768 /* Allocate a major block device number to use with this driver. */
4769 error = register_blkdev(0, MTIP_DRV_NAME);
4770 if (error <= 0) {
4771 pr_err("Unable to register block device (%d)\n",
4772 error);
4773 return -EBUSY;
4774 }
4775 mtip_major = error;
4776
4777 dfs_parent = debugfs_create_dir("rssd", NULL);
4778 if (IS_ERR_OR_NULL(dfs_parent)) {
4779 pr_warn("Error creating debugfs parent\n");
4780 dfs_parent = NULL;
4781 }
4782 if (dfs_parent) {
4783 dfs_device_status = debugfs_create_file("device_status",
4784 S_IRUGO, dfs_parent, NULL,
4785 &mtip_device_status_fops);
4786 if (IS_ERR_OR_NULL(dfs_device_status)) {
4787 pr_err("Error creating device_status node\n");
4788 dfs_device_status = NULL;
4789 }
4790 }
4791
4792 /* Register our PCI operations. */
4793 error = pci_register_driver(&mtip_pci_driver);
4794 if (error) {
4795 debugfs_remove(dfs_parent);
4796 unregister_blkdev(mtip_major, MTIP_DRV_NAME);
4797 }
4798
4799 return error;
4800 }
4801
4802 /*
4803 * Module de-initialization function.
4804 *
4805 * Called once when the module is unloaded. This function deallocates
4806 * the major block device number allocated by mtip_init() and
4807 * unregisters the PCI layer of the driver.
4808 *
4809 * Return value
4810 * none
4811 */
4812 static void __exit mtip_exit(void)
4813 {
4814 /* Release the allocated major block device number. */
4815 unregister_blkdev(mtip_major, MTIP_DRV_NAME);
4816
4817 /* Unregister the PCI driver. */
4818 pci_unregister_driver(&mtip_pci_driver);
4819
4820 debugfs_remove_recursive(dfs_parent);
4821 }
4822
4823 MODULE_AUTHOR("Micron Technology, Inc");
4824 MODULE_DESCRIPTION("Micron RealSSD PCIe Block Driver");
4825 MODULE_LICENSE("GPL");
4826 MODULE_VERSION(MTIP_DRV_VERSION);
4827
4828 module_init(mtip_init);
4829 module_exit(mtip_exit);
This page took 0.210359 seconds and 5 git commands to generate.