[SCSI] mvsas: misc improvements
[deliverable/linux.git] / drivers / scsi / mvsas / mv_sas.c
CommitLineData
b5762948 1/*
20b09c29
AY
2 * Marvell 88SE64xx/88SE94xx main function
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
0b15fb1f 6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
20b09c29
AY
7 *
8 * This file is licensed under GPLv2.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; version 2 of the
13 * License.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 * USA
24*/
b5762948 25
dd4969a8 26#include "mv_sas.h"
b5762948 27
dd4969a8
JG
28static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
29{
30 if (task->lldd_task) {
31 struct mvs_slot_info *slot;
f9da3be5 32 slot = task->lldd_task;
20b09c29 33 *tag = slot->slot_tag;
dd4969a8
JG
34 return 1;
35 }
36 return 0;
37}
8f261aaf 38
20b09c29 39void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
dd4969a8 40{
b89e8f53 41 void *bitmap = mvi->tags;
dd4969a8
JG
42 clear_bit(tag, bitmap);
43}
8f261aaf 44
20b09c29 45void mvs_tag_free(struct mvs_info *mvi, u32 tag)
dd4969a8
JG
46{
47 mvs_tag_clear(mvi, tag);
48}
8f261aaf 49
20b09c29 50void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
dd4969a8 51{
b89e8f53 52 void *bitmap = mvi->tags;
dd4969a8
JG
53 set_bit(tag, bitmap);
54}
8f261aaf 55
20b09c29 56inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
dd4969a8
JG
57{
58 unsigned int index, tag;
b89e8f53 59 void *bitmap = mvi->tags;
b5762948 60
20b09c29 61 index = find_first_zero_bit(bitmap, mvi->tags_num);
dd4969a8 62 tag = index;
20b09c29 63 if (tag >= mvi->tags_num)
dd4969a8
JG
64 return -SAS_QUEUE_FULL;
65 mvs_tag_set(mvi, tag);
66 *tag_out = tag;
67 return 0;
68}
b5762948 69
dd4969a8
JG
70void mvs_tag_init(struct mvs_info *mvi)
71{
72 int i;
20b09c29 73 for (i = 0; i < mvi->tags_num; ++i)
dd4969a8
JG
74 mvs_tag_clear(mvi, i);
75}
b5762948 76
20b09c29
AY
77struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
78{
79 unsigned long i = 0, j = 0, hi = 0;
80 struct sas_ha_struct *sha = dev->port->ha;
81 struct mvs_info *mvi = NULL;
82 struct asd_sas_phy *phy;
83
84 while (sha->sas_port[i]) {
85 if (sha->sas_port[i] == dev->port) {
86 phy = container_of(sha->sas_port[i]->phy_list.next,
87 struct asd_sas_phy, port_phy_el);
88 j = 0;
89 while (sha->sas_phy[j]) {
90 if (sha->sas_phy[j] == phy)
91 break;
92 j++;
93 }
94 break;
95 }
96 i++;
97 }
98 hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
99 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
8f261aaf 100
20b09c29 101 return mvi;
8f261aaf 102
20b09c29 103}
8f261aaf 104
20b09c29
AY
105/* FIXME */
106int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
107{
108 unsigned long i = 0, j = 0, n = 0, num = 0;
9870d9a2
AY
109 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
110 struct mvs_info *mvi = mvi_dev->mvi_info;
20b09c29
AY
111 struct sas_ha_struct *sha = dev->port->ha;
112
113 while (sha->sas_port[i]) {
114 if (sha->sas_port[i] == dev->port) {
115 struct asd_sas_phy *phy;
116 list_for_each_entry(phy,
117 &sha->sas_port[i]->phy_list, port_phy_el) {
118 j = 0;
119 while (sha->sas_phy[j]) {
120 if (sha->sas_phy[j] == phy)
121 break;
122 j++;
123 }
124 phyno[n] = (j >= mvi->chip->n_phy) ?
125 (j - mvi->chip->n_phy) : j;
126 num++;
127 n++;
dd4969a8 128 }
dd4969a8
JG
129 break;
130 }
20b09c29
AY
131 i++;
132 }
133 return num;
134}
135
534ff101
XY
136struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi,
137 u8 reg_set)
138{
139 u32 dev_no;
140 for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) {
141 if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED)
142 continue;
143
144 if (mvi->devices[dev_no].taskfileset == reg_set)
145 return &mvi->devices[dev_no];
146 }
147 return NULL;
148}
149
20b09c29
AY
150static inline void mvs_free_reg_set(struct mvs_info *mvi,
151 struct mvs_device *dev)
152{
153 if (!dev) {
154 mv_printk("device has been free.\n");
155 return;
156 }
20b09c29
AY
157 if (dev->taskfileset == MVS_ID_NOT_MAPPED)
158 return;
159 MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
160}
161
162static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
163 struct mvs_device *dev)
164{
165 if (dev->taskfileset != MVS_ID_NOT_MAPPED)
166 return 0;
167 return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
168}
169
170void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
171{
172 u32 no;
173 for_each_phy(phy_mask, phy_mask, no) {
174 if (!(phy_mask & 1))
175 continue;
176 MVS_CHIP_DISP->phy_reset(mvi, no, hard);
177 }
178}
179
180/* FIXME: locking? */
181int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
182 void *funcdata)
183{
184 int rc = 0, phy_id = sas_phy->id;
185 u32 tmp, i = 0, hi;
186 struct sas_ha_struct *sha = sas_phy->ha;
187 struct mvs_info *mvi = NULL;
188
189 while (sha->sas_phy[i]) {
190 if (sha->sas_phy[i] == sas_phy)
191 break;
192 i++;
193 }
194 hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
195 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
196
197 switch (func) {
198 case PHY_FUNC_SET_LINK_RATE:
199 MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
200 break;
8f261aaf 201
dd4969a8 202 case PHY_FUNC_HARD_RESET:
20b09c29 203 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
dd4969a8
JG
204 if (tmp & PHY_RST_HARD)
205 break;
a4632aae 206 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET);
dd4969a8 207 break;
b5762948 208
dd4969a8 209 case PHY_FUNC_LINK_RESET:
20b09c29 210 MVS_CHIP_DISP->phy_enable(mvi, phy_id);
a4632aae 211 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET);
dd4969a8 212 break;
b5762948 213
dd4969a8 214 case PHY_FUNC_DISABLE:
20b09c29
AY
215 MVS_CHIP_DISP->phy_disable(mvi, phy_id);
216 break;
dd4969a8
JG
217 case PHY_FUNC_RELEASE_SPINUP_HOLD:
218 default:
219 rc = -EOPNOTSUPP;
b5762948 220 }
20b09c29 221 msleep(200);
b5762948
JG
222 return rc;
223}
224
20b09c29
AY
225void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
226 u32 off_lo, u32 off_hi, u64 sas_addr)
227{
228 u32 lo = (u32)sas_addr;
229 u32 hi = (u32)(sas_addr>>32);
230
231 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
232 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
233 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
234 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
235}
236
dd4969a8 237static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
ee1f1c2e 238{
dd4969a8 239 struct mvs_phy *phy = &mvi->phy[i];
20b09c29
AY
240 struct asd_sas_phy *sas_phy = &phy->sas_phy;
241 struct sas_ha_struct *sas_ha;
dd4969a8
JG
242 if (!phy->phy_attached)
243 return;
244
20b09c29
AY
245 if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
246 && phy->phy_type & PORT_TYPE_SAS) {
247 return;
248 }
249
250 sas_ha = mvi->sas;
251 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
252
dd4969a8
JG
253 if (sas_phy->phy) {
254 struct sas_phy *sphy = sas_phy->phy;
255
256 sphy->negotiated_linkrate = sas_phy->linkrate;
257 sphy->minimum_linkrate = phy->minimum_linkrate;
258 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
259 sphy->maximum_linkrate = phy->maximum_linkrate;
20b09c29 260 sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
ee1f1c2e 261 }
ee1f1c2e 262
dd4969a8
JG
263 if (phy->phy_type & PORT_TYPE_SAS) {
264 struct sas_identify_frame *id;
b5762948 265
dd4969a8
JG
266 id = (struct sas_identify_frame *)phy->frame_rcvd;
267 id->dev_type = phy->identify.device_type;
268 id->initiator_bits = SAS_PROTOCOL_ALL;
269 id->target_bits = phy->identify.target_port_protocols;
270 } else if (phy->phy_type & PORT_TYPE_SATA) {
20b09c29 271 /*Nothing*/
dd4969a8 272 }
20b09c29
AY
273 mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
274
275 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
276
277 mvi->sas->notify_port_event(sas_phy,
dd4969a8 278 PORTE_BYTES_DMAED);
ee1f1c2e
KW
279}
280
20b09c29
AY
281int mvs_slave_alloc(struct scsi_device *scsi_dev)
282{
283 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
284 if (dev_is_sata(dev)) {
285 /* We don't need to rescan targets
286 * if REPORT_LUNS request is failed
287 */
288 if (scsi_dev->lun > 0)
289 return -ENXIO;
290 scsi_dev->tagged_supported = 1;
291 }
292
293 return sas_slave_alloc(scsi_dev);
294}
295
dd4969a8 296int mvs_slave_configure(struct scsi_device *sdev)
ee1f1c2e 297{
dd4969a8
JG
298 struct domain_device *dev = sdev_to_domain_dev(sdev);
299 int ret = sas_slave_configure(sdev);
b5762948 300
dd4969a8
JG
301 if (ret)
302 return ret;
84fbd0ce
XY
303 if (!dev_is_sata(dev)) {
304 sas_change_queue_depth(sdev,
305 MVS_QUEUE_SIZE,
306 SCSI_QDEPTH_DEFAULT);
dd4969a8 307 }
ee1f1c2e 308 return 0;
b5762948
JG
309}
310
dd4969a8 311void mvs_scan_start(struct Scsi_Host *shost)
b5762948 312{
20b09c29
AY
313 int i, j;
314 unsigned short core_nr;
315 struct mvs_info *mvi;
316 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
84fbd0ce 317 struct mvs_prv_info *mvs_prv = sha->lldd_ha;
20b09c29
AY
318
319 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
dd4969a8 320
20b09c29
AY
321 for (j = 0; j < core_nr; j++) {
322 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
323 for (i = 0; i < mvi->chip->n_phy; ++i)
324 mvs_bytes_dmaed(mvi, i);
dd4969a8 325 }
84fbd0ce 326 mvs_prv->scan_finished = 1;
b5762948
JG
327}
328
dd4969a8 329int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
b5762948 330{
84fbd0ce
XY
331 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
332 struct mvs_prv_info *mvs_prv = sha->lldd_ha;
333
334 if (mvs_prv->scan_finished == 0)
dd4969a8 335 return 0;
84fbd0ce 336
dd4969a8
JG
337 scsi_flush_work(shost);
338 return 1;
b5762948
JG
339}
340
dd4969a8
JG
341static int mvs_task_prep_smp(struct mvs_info *mvi,
342 struct mvs_task_exec_info *tei)
b5762948 343{
dd4969a8
JG
344 int elem, rc, i;
345 struct sas_task *task = tei->task;
346 struct mvs_cmd_hdr *hdr = tei->hdr;
20b09c29
AY
347 struct domain_device *dev = task->dev;
348 struct asd_sas_port *sas_port = dev->port;
dd4969a8
JG
349 struct scatterlist *sg_req, *sg_resp;
350 u32 req_len, resp_len, tag = tei->tag;
351 void *buf_tmp;
352 u8 *buf_oaf;
353 dma_addr_t buf_tmp_dma;
20b09c29 354 void *buf_prd;
dd4969a8 355 struct mvs_slot_info *slot = &mvi->slot_info[tag];
dd4969a8 356 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
b89e8f53 357
dd4969a8
JG
358 /*
359 * DMA-map SMP request, response buffers
360 */
361 sg_req = &task->smp_task.smp_req;
20b09c29 362 elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
dd4969a8
JG
363 if (!elem)
364 return -ENOMEM;
365 req_len = sg_dma_len(sg_req);
b5762948 366
dd4969a8 367 sg_resp = &task->smp_task.smp_resp;
20b09c29 368 elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
dd4969a8
JG
369 if (!elem) {
370 rc = -ENOMEM;
371 goto err_out;
372 }
20b09c29 373 resp_len = SB_RFB_MAX;
b5762948 374
dd4969a8
JG
375 /* must be in dwords */
376 if ((req_len & 0x3) || (resp_len & 0x3)) {
377 rc = -EINVAL;
378 goto err_out_2;
b5762948
JG
379 }
380
dd4969a8
JG
381 /*
382 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
383 */
b5762948 384
20b09c29 385 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
dd4969a8
JG
386 buf_tmp = slot->buf;
387 buf_tmp_dma = slot->buf_dma;
b5762948 388
dd4969a8 389 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
b5762948 390
dd4969a8
JG
391 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
392 buf_oaf = buf_tmp;
393 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
b5762948 394
dd4969a8
JG
395 buf_tmp += MVS_OAF_SZ;
396 buf_tmp_dma += MVS_OAF_SZ;
b5762948 397
20b09c29 398 /* region 3: PRD table *********************************** */
dd4969a8
JG
399 buf_prd = buf_tmp;
400 if (tei->n_elem)
401 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
402 else
403 hdr->prd_tbl = 0;
b5762948 404
20b09c29 405 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
dd4969a8
JG
406 buf_tmp += i;
407 buf_tmp_dma += i;
b5762948 408
dd4969a8
JG
409 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
410 slot->response = buf_tmp;
411 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
20b09c29
AY
412 if (mvi->flags & MVF_FLAG_SOC)
413 hdr->reserved[0] = 0;
b5762948 414
dd4969a8
JG
415 /*
416 * Fill in TX ring and command slot header
417 */
418 slot->tx = mvi->tx_prod;
419 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
420 TXQ_MODE_I | tag |
421 (sas_port->phy_mask << TXQ_PHY_SHIFT));
b5762948 422
dd4969a8
JG
423 hdr->flags |= flags;
424 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
425 hdr->tags = cpu_to_le32(tag);
426 hdr->data_len = 0;
b5762948 427
dd4969a8 428 /* generate open address frame hdr (first 12 bytes) */
20b09c29
AY
429 /* initiator, SMP, ftype 1h */
430 buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
431 buf_oaf[1] = dev->linkrate & 0xf;
dd4969a8 432 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
20b09c29 433 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
dd4969a8
JG
434
435 /* fill in PRD (scatter/gather) table, if any */
20b09c29 436 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
b5762948
JG
437
438 return 0;
439
dd4969a8 440err_out_2:
20b09c29 441 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
dd4969a8 442 PCI_DMA_FROMDEVICE);
b5762948 443err_out:
20b09c29 444 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
dd4969a8 445 PCI_DMA_TODEVICE);
8f261aaf 446 return rc;
8f261aaf
KW
447}
448
dd4969a8 449static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
8f261aaf 450{
dd4969a8 451 struct ata_queued_cmd *qc = task->uldd_task;
8f261aaf 452
dd4969a8
JG
453 if (qc) {
454 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
455 qc->tf.command == ATA_CMD_FPDMA_READ) {
456 *tag = qc->tag;
457 return 1;
458 }
8f261aaf 459 }
8f261aaf 460
dd4969a8 461 return 0;
8f261aaf
KW
462}
463
dd4969a8
JG
464static int mvs_task_prep_ata(struct mvs_info *mvi,
465 struct mvs_task_exec_info *tei)
b5762948
JG
466{
467 struct sas_task *task = tei->task;
468 struct domain_device *dev = task->dev;
f9da3be5 469 struct mvs_device *mvi_dev = dev->lldd_dev;
b5762948
JG
470 struct mvs_cmd_hdr *hdr = tei->hdr;
471 struct asd_sas_port *sas_port = dev->port;
8f261aaf 472 struct mvs_slot_info *slot;
20b09c29
AY
473 void *buf_prd;
474 u32 tag = tei->tag, hdr_tag;
475 u32 flags, del_q;
b5762948
JG
476 void *buf_tmp;
477 u8 *buf_cmd, *buf_oaf;
478 dma_addr_t buf_tmp_dma;
8f261aaf
KW
479 u32 i, req_len, resp_len;
480 const u32 max_resp_len = SB_RFB_MAX;
481
20b09c29
AY
482 if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
483 mv_dprintk("Have not enough regiset for dev %d.\n",
484 mvi_dev->device_id);
8f261aaf 485 return -EBUSY;
20b09c29 486 }
8f261aaf
KW
487 slot = &mvi->slot_info[tag];
488 slot->tx = mvi->tx_prod;
20b09c29
AY
489 del_q = TXQ_MODE_I | tag |
490 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
491 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
492 (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
493 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
494
20b09c29
AY
495 if (task->data_dir == DMA_FROM_DEVICE)
496 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
497 else
498 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
8882f081 499
b5762948
JG
500 if (task->ata_task.use_ncq)
501 flags |= MCH_FPDMA;
8f261aaf
KW
502 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
503 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
504 flags |= MCH_ATAPI;
505 }
506
b5762948
JG
507 /* FIXME: fill in port multiplier number */
508
509 hdr->flags = cpu_to_le32(flags);
8f261aaf
KW
510
511 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
20b09c29
AY
512 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
513 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
4e52fc0a 514 else
20b09c29
AY
515 hdr_tag = tag;
516
517 hdr->tags = cpu_to_le32(hdr_tag);
518
b5762948
JG
519 hdr->data_len = cpu_to_le32(task->total_xfer_len);
520
521 /*
522 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
523 */
b5762948 524
8f261aaf
KW
525 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
526 buf_cmd = buf_tmp = slot->buf;
b5762948
JG
527 buf_tmp_dma = slot->buf_dma;
528
529 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
530
531 buf_tmp += MVS_ATA_CMD_SZ;
532 buf_tmp_dma += MVS_ATA_CMD_SZ;
533
8f261aaf 534 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
b5762948
JG
535 /* used for STP. unused for SATA? */
536 buf_oaf = buf_tmp;
537 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
538
539 buf_tmp += MVS_OAF_SZ;
540 buf_tmp_dma += MVS_OAF_SZ;
541
8f261aaf 542 /* region 3: PRD table ********************************************* */
b5762948 543 buf_prd = buf_tmp;
20b09c29 544
8f261aaf
KW
545 if (tei->n_elem)
546 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
547 else
548 hdr->prd_tbl = 0;
20b09c29 549 i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
b5762948 550
b5762948
JG
551 buf_tmp += i;
552 buf_tmp_dma += i;
553
8f261aaf 554 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
b5762948
JG
555 /* FIXME: probably unused, for SATA. kept here just in case
556 * we get a STP/SATA error information record
557 */
558 slot->response = buf_tmp;
559 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
20b09c29
AY
560 if (mvi->flags & MVF_FLAG_SOC)
561 hdr->reserved[0] = 0;
b5762948 562
8f261aaf 563 req_len = sizeof(struct host_to_dev_fis);
b5762948 564 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
8f261aaf 565 sizeof(struct mvs_err_info) - i;
b5762948
JG
566
567 /* request, response lengths */
8f261aaf 568 resp_len = min(resp_len, max_resp_len);
b5762948
JG
569 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
570
20b09c29
AY
571 if (likely(!task->ata_task.device_control_reg_update))
572 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
b5762948 573 /* fill in command FIS and ATAPI CDB */
8f261aaf
KW
574 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
575 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
576 memcpy(buf_cmd + STP_ATAPI_CMD,
577 task->ata_task.atapi_packet, 16);
578
579 /* generate open address frame hdr (first 12 bytes) */
20b09c29
AY
580 /* initiator, STP, ftype 1h */
581 buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
582 buf_oaf[1] = dev->linkrate & 0xf;
583 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
584 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
b5762948
JG
585
586 /* fill in PRD (scatter/gather) table, if any */
20b09c29 587 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
8882f081 588
20b09c29 589 if (task->data_dir == DMA_FROM_DEVICE)
8882f081 590 MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask,
20b09c29 591 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
8882f081 592
b5762948
JG
593 return 0;
594}
595
596static int mvs_task_prep_ssp(struct mvs_info *mvi,
20b09c29
AY
597 struct mvs_task_exec_info *tei, int is_tmf,
598 struct mvs_tmf_task *tmf)
b5762948
JG
599{
600 struct sas_task *task = tei->task;
b5762948 601 struct mvs_cmd_hdr *hdr = tei->hdr;
8f261aaf 602 struct mvs_port *port = tei->port;
20b09c29 603 struct domain_device *dev = task->dev;
f9da3be5 604 struct mvs_device *mvi_dev = dev->lldd_dev;
20b09c29 605 struct asd_sas_port *sas_port = dev->port;
b5762948 606 struct mvs_slot_info *slot;
20b09c29 607 void *buf_prd;
b5762948
JG
608 struct ssp_frame_hdr *ssp_hdr;
609 void *buf_tmp;
610 u8 *buf_cmd, *buf_oaf, fburst = 0;
611 dma_addr_t buf_tmp_dma;
612 u32 flags;
8f261aaf
KW
613 u32 resp_len, req_len, i, tag = tei->tag;
614 const u32 max_resp_len = SB_RFB_MAX;
20b09c29 615 u32 phy_mask;
b5762948
JG
616
617 slot = &mvi->slot_info[tag];
618
20b09c29
AY
619 phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
620 sas_port->phy_mask) & TXQ_PHY_MASK;
621
8f261aaf
KW
622 slot->tx = mvi->tx_prod;
623 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
624 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
4e52fc0a 625 (phy_mask << TXQ_PHY_SHIFT));
b5762948
JG
626
627 flags = MCH_RETRY;
628 if (task->ssp_task.enable_first_burst) {
629 flags |= MCH_FBURST;
630 fburst = (1 << 7);
631 }
2b288133
AY
632 if (is_tmf)
633 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
84fbd0ce
XY
634 else
635 flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
636
2b288133 637 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
b5762948
JG
638 hdr->tags = cpu_to_le32(tag);
639 hdr->data_len = cpu_to_le32(task->total_xfer_len);
640
641 /*
642 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
643 */
b5762948 644
8f261aaf
KW
645 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
646 buf_cmd = buf_tmp = slot->buf;
b5762948
JG
647 buf_tmp_dma = slot->buf_dma;
648
649 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
650
651 buf_tmp += MVS_SSP_CMD_SZ;
652 buf_tmp_dma += MVS_SSP_CMD_SZ;
653
8f261aaf 654 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
b5762948
JG
655 buf_oaf = buf_tmp;
656 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
657
658 buf_tmp += MVS_OAF_SZ;
659 buf_tmp_dma += MVS_OAF_SZ;
660
8f261aaf 661 /* region 3: PRD table ********************************************* */
b5762948 662 buf_prd = buf_tmp;
8f261aaf
KW
663 if (tei->n_elem)
664 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
665 else
666 hdr->prd_tbl = 0;
b5762948 667
20b09c29 668 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
b5762948
JG
669 buf_tmp += i;
670 buf_tmp_dma += i;
671
8f261aaf 672 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
b5762948
JG
673 slot->response = buf_tmp;
674 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
20b09c29
AY
675 if (mvi->flags & MVF_FLAG_SOC)
676 hdr->reserved[0] = 0;
b5762948 677
b5762948 678 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
8f261aaf
KW
679 sizeof(struct mvs_err_info) - i;
680 resp_len = min(resp_len, max_resp_len);
681
682 req_len = sizeof(struct ssp_frame_hdr) + 28;
b5762948
JG
683
684 /* request, response lengths */
685 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
686
687 /* generate open address frame hdr (first 12 bytes) */
20b09c29
AY
688 /* initiator, SSP, ftype 1h */
689 buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
690 buf_oaf[1] = dev->linkrate & 0xf;
691 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
692 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
b5762948 693
8f261aaf
KW
694 /* fill in SSP frame header (Command Table.SSP frame header) */
695 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
20b09c29
AY
696
697 if (is_tmf)
698 ssp_hdr->frame_type = SSP_TASK;
699 else
700 ssp_hdr->frame_type = SSP_COMMAND;
701
702 memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
b5762948
JG
703 HASHED_SAS_ADDR_SIZE);
704 memcpy(ssp_hdr->hashed_src_addr,
20b09c29 705 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
b5762948
JG
706 ssp_hdr->tag = cpu_to_be16(tag);
707
20b09c29 708 /* fill in IU for TASK and Command Frame */
b5762948
JG
709 buf_cmd += sizeof(*ssp_hdr);
710 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
b5762948 711
20b09c29
AY
712 if (ssp_hdr->frame_type != SSP_TASK) {
713 buf_cmd[9] = fburst | task->ssp_task.task_attr |
714 (task->ssp_task.task_prio << 3);
715 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
716 } else{
717 buf_cmd[10] = tmf->tmf;
718 switch (tmf->tmf) {
719 case TMF_ABORT_TASK:
720 case TMF_QUERY_TASK:
721 buf_cmd[12] =
722 (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
723 buf_cmd[13] =
724 tmf->tag_of_task_to_be_managed & 0xff;
725 break;
726 default:
727 break;
728 }
b5762948 729 }
20b09c29
AY
730 /* fill in PRD (scatter/gather) table, if any */
731 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
b5762948
JG
732 return 0;
733}
734
20b09c29 735#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
0b15fb1f
XY
736static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
737 struct mvs_tmf_task *tmf, int *pass)
b5762948 738{
8f261aaf 739 struct domain_device *dev = task->dev;
0b15fb1f 740 struct mvs_device *mvi_dev = dev->lldd_dev;
b5762948 741 struct mvs_task_exec_info tei;
4e52fc0a 742 struct mvs_slot_info *slot;
0b15fb1f
XY
743 u32 tag = 0xdeadbeef, n_elem = 0;
744 int rc = 0;
b5762948 745
20b09c29 746 if (!dev->port) {
0b15fb1f 747 struct task_status_struct *tsm = &task->task_status;
20b09c29
AY
748
749 tsm->resp = SAS_TASK_UNDELIVERED;
750 tsm->stat = SAS_PHY_DOWN;
0b15fb1f
XY
751 /*
752 * libsas will use dev->port, should
753 * not call task_done for sata
754 */
9dc9fd94 755 if (dev->dev_type != SATA_DEV)
0b15fb1f
XY
756 task->task_done(task);
757 return rc;
20b09c29
AY
758 }
759
0b15fb1f
XY
760 if (DEV_IS_GONE(mvi_dev)) {
761 if (mvi_dev)
762 mv_dprintk("device %d not ready.\n",
763 mvi_dev->device_id);
764 else
765 mv_dprintk("device %016llx not ready.\n",
766 SAS_ADDR(dev->sas_addr));
20b09c29
AY
767
768 rc = SAS_PHY_DOWN;
0b15fb1f
XY
769 return rc;
770 }
771 tei.port = dev->port->lldd_port;
772 if (tei.port && !tei.port->port_attached && !tmf) {
773 if (sas_protocol_ata(task->task_proto)) {
774 struct task_status_struct *ts = &task->task_status;
775 mv_dprintk("SATA/STP port %d does not attach"
776 "device.\n", dev->port->id);
777 ts->resp = SAS_TASK_COMPLETE;
778 ts->stat = SAS_PHY_DOWN;
20b09c29 779
0b15fb1f 780 task->task_done(task);
dd4969a8 781
dd4969a8 782 } else {
0b15fb1f
XY
783 struct task_status_struct *ts = &task->task_status;
784 mv_dprintk("SAS port %d does not attach"
785 "device.\n", dev->port->id);
786 ts->resp = SAS_TASK_UNDELIVERED;
787 ts->stat = SAS_PHY_DOWN;
788 task->task_done(task);
dd4969a8 789 }
0b15fb1f
XY
790 return rc;
791 }
dd4969a8 792
0b15fb1f
XY
793 if (!sas_protocol_ata(task->task_proto)) {
794 if (task->num_scatter) {
795 n_elem = dma_map_sg(mvi->dev,
796 task->scatter,
797 task->num_scatter,
798 task->data_dir);
799 if (!n_elem) {
800 rc = -ENOMEM;
801 goto prep_out;
802 }
803 }
804 } else {
805 n_elem = task->num_scatter;
806 }
20b09c29 807
0b15fb1f
XY
808 rc = mvs_tag_alloc(mvi, &tag);
809 if (rc)
810 goto err_out;
20b09c29 811
0b15fb1f 812 slot = &mvi->slot_info[tag];
20b09c29 813
0b15fb1f
XY
814 task->lldd_task = NULL;
815 slot->n_elem = n_elem;
816 slot->slot_tag = tag;
817
818 slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
819 if (!slot->buf)
820 goto err_out_tag;
821 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
822
823 tei.task = task;
824 tei.hdr = &mvi->slot[tag];
825 tei.tag = tag;
826 tei.n_elem = n_elem;
827 switch (task->task_proto) {
828 case SAS_PROTOCOL_SMP:
829 rc = mvs_task_prep_smp(mvi, &tei);
830 break;
831 case SAS_PROTOCOL_SSP:
832 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
833 break;
834 case SAS_PROTOCOL_SATA:
835 case SAS_PROTOCOL_STP:
836 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
837 rc = mvs_task_prep_ata(mvi, &tei);
838 break;
839 default:
840 dev_printk(KERN_ERR, mvi->dev,
841 "unknown sas_task proto: 0x%x\n",
842 task->task_proto);
843 rc = -EINVAL;
844 break;
845 }
dd4969a8 846
0b15fb1f
XY
847 if (rc) {
848 mv_dprintk("rc is %x\n", rc);
849 goto err_out_slot_buf;
850 }
851 slot->task = task;
852 slot->port = tei.port;
853 task->lldd_task = slot;
854 list_add_tail(&slot->entry, &tei.port->list);
855 spin_lock(&task->task_state_lock);
856 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
857 spin_unlock(&task->task_state_lock);
858
0b15fb1f
XY
859 mvi_dev->running_req++;
860 ++(*pass);
861 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
9dc9fd94 862
0b15fb1f 863 return rc;
dd4969a8 864
0b15fb1f
XY
865err_out_slot_buf:
866 pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
dd4969a8
JG
867err_out_tag:
868 mvs_tag_free(mvi, tag);
869err_out:
20b09c29 870
0b15fb1f
XY
871 dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc);
872 if (!sas_protocol_ata(task->task_proto))
dd4969a8 873 if (n_elem)
0b15fb1f
XY
874 dma_unmap_sg(mvi->dev, task->scatter, n_elem,
875 task->data_dir);
876prep_out:
877 return rc;
878}
879
880static struct mvs_task_list *mvs_task_alloc_list(int *num, gfp_t gfp_flags)
881{
882 struct mvs_task_list *first = NULL;
883
884 for (; *num > 0; --*num) {
885 struct mvs_task_list *mvs_list = kmem_cache_zalloc(mvs_task_list_cache, gfp_flags);
886
887 if (!mvs_list)
888 break;
889
890 INIT_LIST_HEAD(&mvs_list->list);
891 if (!first)
892 first = mvs_list;
893 else
894 list_add_tail(&mvs_list->list, &first->list);
895
896 }
897
898 return first;
899}
900
901static inline void mvs_task_free_list(struct mvs_task_list *mvs_list)
902{
903 LIST_HEAD(list);
904 struct list_head *pos, *a;
905 struct mvs_task_list *mlist = NULL;
906
907 __list_add(&list, mvs_list->list.prev, &mvs_list->list);
908
909 list_for_each_safe(pos, a, &list) {
910 list_del_init(pos);
911 mlist = list_entry(pos, struct mvs_task_list, list);
912 kmem_cache_free(mvs_task_list_cache, mlist);
913 }
914}
915
916static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
917 struct completion *completion, int is_tmf,
918 struct mvs_tmf_task *tmf)
919{
920 struct domain_device *dev = task->dev;
921 struct mvs_info *mvi = NULL;
922 u32 rc = 0;
923 u32 pass = 0;
924 unsigned long flags = 0;
925
926 mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
927
928 if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
929 spin_unlock_irq(dev->sata_dev.ap->lock);
930
931 spin_lock_irqsave(&mvi->lock, flags);
932 rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass);
933 if (rc)
934 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
935
936 if (likely(pass))
937 MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) &
938 (MVS_CHIP_SLOT_SZ - 1));
0b84b709 939 spin_unlock_irqrestore(&mvi->lock, flags);
0b15fb1f
XY
940
941 if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
942 spin_lock_irq(dev->sata_dev.ap->lock);
943
944 return rc;
945}
946
947static int mvs_collector_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
948 struct completion *completion, int is_tmf,
949 struct mvs_tmf_task *tmf)
950{
951 struct domain_device *dev = task->dev;
952 struct mvs_prv_info *mpi = dev->port->ha->lldd_ha;
953 struct mvs_info *mvi = NULL;
954 struct sas_task *t = task;
955 struct mvs_task_list *mvs_list = NULL, *a;
956 LIST_HEAD(q);
957 int pass[2] = {0};
958 u32 rc = 0;
959 u32 n = num;
960 unsigned long flags = 0;
961
962 mvs_list = mvs_task_alloc_list(&n, gfp_flags);
963 if (n) {
964 printk(KERN_ERR "%s: mvs alloc list failed.\n", __func__);
965 rc = -ENOMEM;
966 goto free_list;
967 }
968
969 __list_add(&q, mvs_list->list.prev, &mvs_list->list);
970
971 list_for_each_entry(a, &q, list) {
972 a->task = t;
973 t = list_entry(t->list.next, struct sas_task, list);
974 }
975
976 list_for_each_entry(a, &q , list) {
977
978 t = a->task;
979 mvi = ((struct mvs_device *)t->dev->lldd_dev)->mvi_info;
980
981 spin_lock_irqsave(&mvi->lock, flags);
982 rc = mvs_task_prep(t, mvi, is_tmf, tmf, &pass[mvi->id]);
983 if (rc)
984 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
985 spin_unlock_irqrestore(&mvi->lock, flags);
986 }
987
988 if (likely(pass[0]))
989 MVS_CHIP_DISP->start_delivery(mpi->mvi[0],
990 (mpi->mvi[0]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
991
992 if (likely(pass[1]))
993 MVS_CHIP_DISP->start_delivery(mpi->mvi[1],
994 (mpi->mvi[1]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
995
996 list_del_init(&q);
997
998free_list:
999 if (mvs_list)
1000 mvs_task_free_list(mvs_list);
1001
dd4969a8
JG
1002 return rc;
1003}
1004
20b09c29
AY
1005int mvs_queue_command(struct sas_task *task, const int num,
1006 gfp_t gfp_flags)
1007{
0b15fb1f
XY
1008 struct mvs_device *mvi_dev = task->dev->lldd_dev;
1009 struct sas_ha_struct *sas = mvi_dev->mvi_info->sas;
1010
1011 if (sas->lldd_max_execute_num < 2)
1012 return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
1013 else
1014 return mvs_collector_task_exec(task, num, gfp_flags, NULL, 0, NULL);
20b09c29
AY
1015}
1016
dd4969a8
JG
1017static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
1018{
1019 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1020 mvs_tag_clear(mvi, slot_idx);
1021}
1022
1023static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1024 struct mvs_slot_info *slot, u32 slot_idx)
1025{
20b09c29
AY
1026 if (!slot->task)
1027 return;
dd4969a8
JG
1028 if (!sas_protocol_ata(task->task_proto))
1029 if (slot->n_elem)
20b09c29 1030 dma_unmap_sg(mvi->dev, task->scatter,
dd4969a8
JG
1031 slot->n_elem, task->data_dir);
1032
1033 switch (task->task_proto) {
1034 case SAS_PROTOCOL_SMP:
20b09c29 1035 dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
dd4969a8 1036 PCI_DMA_FROMDEVICE);
20b09c29 1037 dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
dd4969a8
JG
1038 PCI_DMA_TODEVICE);
1039 break;
1040
1041 case SAS_PROTOCOL_SATA:
1042 case SAS_PROTOCOL_STP:
1043 case SAS_PROTOCOL_SSP:
1044 default:
1045 /* do nothing */
1046 break;
1047 }
0b15fb1f
XY
1048
1049 if (slot->buf) {
1050 pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
1051 slot->buf = NULL;
1052 }
20b09c29 1053 list_del_init(&slot->entry);
dd4969a8
JG
1054 task->lldd_task = NULL;
1055 slot->task = NULL;
1056 slot->port = NULL;
20b09c29
AY
1057 slot->slot_tag = 0xFFFFFFFF;
1058 mvs_slot_free(mvi, slot_idx);
dd4969a8
JG
1059}
1060
84fbd0ce 1061static void mvs_update_wideport(struct mvs_info *mvi, int phy_no)
dd4969a8 1062{
84fbd0ce 1063 struct mvs_phy *phy = &mvi->phy[phy_no];
dd4969a8
JG
1064 struct mvs_port *port = phy->port;
1065 int j, no;
1066
20b09c29
AY
1067 for_each_phy(port->wide_port_phymap, j, no) {
1068 if (j & 1) {
1069 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1070 PHYR_WIDE_PORT);
1071 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
dd4969a8
JG
1072 port->wide_port_phymap);
1073 } else {
20b09c29
AY
1074 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1075 PHYR_WIDE_PORT);
1076 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1077 0);
dd4969a8 1078 }
20b09c29 1079 }
dd4969a8
JG
1080}
1081
1082static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
1083{
1084 u32 tmp;
1085 struct mvs_phy *phy = &mvi->phy[i];
20b09c29 1086 struct mvs_port *port = phy->port;
dd4969a8 1087
20b09c29 1088 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
dd4969a8
JG
1089 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
1090 if (!port)
1091 phy->phy_attached = 1;
1092 return tmp;
1093 }
1094
1095 if (port) {
1096 if (phy->phy_type & PORT_TYPE_SAS) {
1097 port->wide_port_phymap &= ~(1U << i);
1098 if (!port->wide_port_phymap)
1099 port->port_attached = 0;
1100 mvs_update_wideport(mvi, i);
1101 } else if (phy->phy_type & PORT_TYPE_SATA)
1102 port->port_attached = 0;
dd4969a8
JG
1103 phy->port = NULL;
1104 phy->phy_attached = 0;
1105 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
1106 }
1107 return 0;
1108}
1109
1110static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
1111{
1112 u32 *s = (u32 *) buf;
1113
1114 if (!s)
1115 return NULL;
1116
20b09c29 1117 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
84fbd0ce 1118 s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
dd4969a8 1119
20b09c29 1120 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
84fbd0ce 1121 s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
dd4969a8 1122
20b09c29 1123 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
84fbd0ce 1124 s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
dd4969a8 1125
20b09c29 1126 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
84fbd0ce 1127 s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
20b09c29
AY
1128
1129 /* Workaround: take some ATAPI devices for ATA */
1130 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
1131 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
dd4969a8 1132
f9da3be5 1133 return s;
dd4969a8
JG
1134}
1135
1136static u32 mvs_is_sig_fis_received(u32 irq_status)
1137{
1138 return irq_status & PHYEV_SIG_FIS;
1139}
1140
8882f081
XY
1141static void mvs_sig_remove_timer(struct mvs_phy *phy)
1142{
1143 if (phy->timer.function)
1144 del_timer(&phy->timer);
1145 phy->timer.function = NULL;
1146}
1147
20b09c29 1148void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
dd4969a8
JG
1149{
1150 struct mvs_phy *phy = &mvi->phy[i];
20b09c29 1151 struct sas_identify_frame *id;
b5762948 1152
20b09c29 1153 id = (struct sas_identify_frame *)phy->frame_rcvd;
b5762948 1154
dd4969a8 1155 if (get_st) {
20b09c29 1156 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
dd4969a8
JG
1157 phy->phy_status = mvs_is_phy_ready(mvi, i);
1158 }
8f261aaf 1159
dd4969a8 1160 if (phy->phy_status) {
20b09c29
AY
1161 int oob_done = 0;
1162 struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
b5762948 1163
20b09c29
AY
1164 oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
1165
1166 MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
1167 if (phy->phy_type & PORT_TYPE_SATA) {
1168 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1169 if (mvs_is_sig_fis_received(phy->irq_status)) {
8882f081 1170 mvs_sig_remove_timer(phy);
20b09c29
AY
1171 phy->phy_attached = 1;
1172 phy->att_dev_sas_addr =
1173 i + mvi->id * mvi->chip->n_phy;
1174 if (oob_done)
1175 sas_phy->oob_mode = SATA_OOB_MODE;
1176 phy->frame_rcvd_size =
1177 sizeof(struct dev_to_host_fis);
f9da3be5 1178 mvs_get_d2h_reg(mvi, i, id);
20b09c29
AY
1179 } else {
1180 u32 tmp;
1181 dev_printk(KERN_DEBUG, mvi->dev,
1182 "Phy%d : No sig fis\n", i);
1183 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
1184 MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
1185 tmp | PHYEV_SIG_FIS);
1186 phy->phy_attached = 0;
1187 phy->phy_type &= ~PORT_TYPE_SATA;
20b09c29
AY
1188 goto out_done;
1189 }
9dc9fd94 1190 } else if (phy->phy_type & PORT_TYPE_SAS
20b09c29
AY
1191 || phy->att_dev_info & PORT_SSP_INIT_MASK) {
1192 phy->phy_attached = 1;
dd4969a8 1193 phy->identify.device_type =
20b09c29 1194 phy->att_dev_info & PORT_DEV_TYPE_MASK;
b5762948 1195
dd4969a8
JG
1196 if (phy->identify.device_type == SAS_END_DEV)
1197 phy->identify.target_port_protocols =
1198 SAS_PROTOCOL_SSP;
1199 else if (phy->identify.device_type != NO_DEVICE)
1200 phy->identify.target_port_protocols =
1201 SAS_PROTOCOL_SMP;
20b09c29 1202 if (oob_done)
dd4969a8
JG
1203 sas_phy->oob_mode = SAS_OOB_MODE;
1204 phy->frame_rcvd_size =
1205 sizeof(struct sas_identify_frame);
dd4969a8 1206 }
20b09c29
AY
1207 memcpy(sas_phy->attached_sas_addr,
1208 &phy->att_dev_sas_addr, SAS_ADDR_SIZE);
b5762948 1209
20b09c29
AY
1210 if (MVS_CHIP_DISP->phy_work_around)
1211 MVS_CHIP_DISP->phy_work_around(mvi, i);
dd4969a8 1212 }
84fbd0ce 1213 mv_dprintk("phy %d attach dev info is %x\n",
20b09c29 1214 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
84fbd0ce 1215 mv_dprintk("phy %d attach sas addr is %llx\n",
20b09c29 1216 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
4e52fc0a 1217out_done:
dd4969a8 1218 if (get_st)
20b09c29 1219 MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
b5762948
JG
1220}
1221
20b09c29 1222static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
8f261aaf 1223{
dd4969a8 1224 struct sas_ha_struct *sas_ha = sas_phy->ha;
20b09c29 1225 struct mvs_info *mvi = NULL; int i = 0, hi;
dd4969a8 1226 struct mvs_phy *phy = sas_phy->lldd_phy;
20b09c29
AY
1227 struct asd_sas_port *sas_port = sas_phy->port;
1228 struct mvs_port *port;
1229 unsigned long flags = 0;
1230 if (!sas_port)
1231 return;
8f261aaf 1232
20b09c29
AY
1233 while (sas_ha->sas_phy[i]) {
1234 if (sas_ha->sas_phy[i] == sas_phy)
1235 break;
1236 i++;
1237 }
1238 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
1239 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
84fbd0ce
XY
1240 if (i >= mvi->chip->n_phy)
1241 port = &mvi->port[i - mvi->chip->n_phy];
20b09c29 1242 else
84fbd0ce 1243 port = &mvi->port[i];
20b09c29
AY
1244 if (lock)
1245 spin_lock_irqsave(&mvi->lock, flags);
dd4969a8
JG
1246 port->port_attached = 1;
1247 phy->port = port;
0b15fb1f 1248 sas_port->lldd_port = port;
dd4969a8
JG
1249 if (phy->phy_type & PORT_TYPE_SAS) {
1250 port->wide_port_phymap = sas_port->phy_mask;
20b09c29 1251 mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
dd4969a8 1252 mvs_update_wideport(mvi, sas_phy->id);
8f261aaf 1253 }
20b09c29
AY
1254 if (lock)
1255 spin_unlock_irqrestore(&mvi->lock, flags);
dd4969a8
JG
1256}
1257
20b09c29 1258static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
dd4969a8 1259{
9dc9fd94
S
1260 struct domain_device *dev;
1261 struct mvs_phy *phy = sas_phy->lldd_phy;
1262 struct mvs_info *mvi = phy->mvi;
1263 struct asd_sas_port *port = sas_phy->port;
1264 int phy_no = 0;
1265
1266 while (phy != &mvi->phy[phy_no]) {
1267 phy_no++;
1268 if (phy_no >= MVS_MAX_PHYS)
1269 return;
1270 }
1271 list_for_each_entry(dev, &port->dev_list, dev_list_node)
84fbd0ce 1272 mvs_do_release_task(phy->mvi, phy_no, dev);
9dc9fd94 1273
dd4969a8
JG
1274}
1275
dd4969a8 1276
20b09c29
AY
1277void mvs_port_formed(struct asd_sas_phy *sas_phy)
1278{
1279 mvs_port_notify_formed(sas_phy, 1);
dd4969a8
JG
1280}
1281
20b09c29 1282void mvs_port_deformed(struct asd_sas_phy *sas_phy)
dd4969a8 1283{
20b09c29
AY
1284 mvs_port_notify_deformed(sas_phy, 1);
1285}
8f261aaf 1286
20b09c29
AY
1287struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
1288{
1289 u32 dev;
1290 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
1291 if (mvi->devices[dev].dev_type == NO_DEVICE) {
1292 mvi->devices[dev].device_id = dev;
1293 return &mvi->devices[dev];
1294 }
8f261aaf 1295 }
8121ed42 1296
20b09c29
AY
1297 if (dev == MVS_MAX_DEVICES)
1298 mv_printk("max support %d devices, ignore ..\n",
1299 MVS_MAX_DEVICES);
1300
1301 return NULL;
8f261aaf
KW
1302}
1303
20b09c29 1304void mvs_free_dev(struct mvs_device *mvi_dev)
b5762948 1305{
20b09c29
AY
1306 u32 id = mvi_dev->device_id;
1307 memset(mvi_dev, 0, sizeof(*mvi_dev));
1308 mvi_dev->device_id = id;
1309 mvi_dev->dev_type = NO_DEVICE;
1310 mvi_dev->dev_status = MVS_DEV_NORMAL;
1311 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
1312}
b5762948 1313
20b09c29
AY
1314int mvs_dev_found_notify(struct domain_device *dev, int lock)
1315{
1316 unsigned long flags = 0;
1317 int res = 0;
1318 struct mvs_info *mvi = NULL;
1319 struct domain_device *parent_dev = dev->parent;
1320 struct mvs_device *mvi_device;
b5762948 1321
20b09c29 1322 mvi = mvs_find_dev_mvi(dev);
b5762948 1323
20b09c29
AY
1324 if (lock)
1325 spin_lock_irqsave(&mvi->lock, flags);
1326
1327 mvi_device = mvs_alloc_dev(mvi);
1328 if (!mvi_device) {
1329 res = -1;
1330 goto found_out;
b5762948 1331 }
f9da3be5 1332 dev->lldd_dev = mvi_device;
9dc9fd94 1333 mvi_device->dev_status = MVS_DEV_NORMAL;
20b09c29 1334 mvi_device->dev_type = dev->dev_type;
9870d9a2 1335 mvi_device->mvi_info = mvi;
84fbd0ce 1336 mvi_device->sas_device = dev;
20b09c29
AY
1337 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
1338 int phy_id;
1339 u8 phy_num = parent_dev->ex_dev.num_phys;
1340 struct ex_phy *phy;
1341 for (phy_id = 0; phy_id < phy_num; phy_id++) {
1342 phy = &parent_dev->ex_dev.ex_phy[phy_id];
1343 if (SAS_ADDR(phy->attached_sas_addr) ==
1344 SAS_ADDR(dev->sas_addr)) {
1345 mvi_device->attached_phy = phy_id;
1346 break;
1347 }
1348 }
b5762948 1349
20b09c29
AY
1350 if (phy_id == phy_num) {
1351 mv_printk("Error: no attached dev:%016llx"
1352 "at ex:%016llx.\n",
1353 SAS_ADDR(dev->sas_addr),
1354 SAS_ADDR(parent_dev->sas_addr));
1355 res = -1;
1356 }
dd4969a8 1357 }
b5762948 1358
20b09c29
AY
1359found_out:
1360 if (lock)
1361 spin_unlock_irqrestore(&mvi->lock, flags);
1362 return res;
1363}
b5762948 1364
20b09c29
AY
1365int mvs_dev_found(struct domain_device *dev)
1366{
1367 return mvs_dev_found_notify(dev, 1);
1368}
b5762948 1369
9dc9fd94 1370void mvs_dev_gone_notify(struct domain_device *dev)
20b09c29
AY
1371{
1372 unsigned long flags = 0;
f9da3be5 1373 struct mvs_device *mvi_dev = dev->lldd_dev;
9870d9a2 1374 struct mvs_info *mvi = mvi_dev->mvi_info;
b5762948 1375
9dc9fd94 1376 spin_lock_irqsave(&mvi->lock, flags);
b5762948 1377
20b09c29
AY
1378 if (mvi_dev) {
1379 mv_dprintk("found dev[%d:%x] is gone.\n",
1380 mvi_dev->device_id, mvi_dev->dev_type);
9dc9fd94 1381 mvs_release_task(mvi, dev);
20b09c29
AY
1382 mvs_free_reg_set(mvi, mvi_dev);
1383 mvs_free_dev(mvi_dev);
1384 } else {
1385 mv_dprintk("found dev has gone.\n");
b5762948 1386 }
20b09c29 1387 dev->lldd_dev = NULL;
84fbd0ce 1388 mvi_dev->sas_device = NULL;
b5762948 1389
9dc9fd94 1390 spin_unlock_irqrestore(&mvi->lock, flags);
b5762948
JG
1391}
1392
b5762948 1393
20b09c29
AY
1394void mvs_dev_gone(struct domain_device *dev)
1395{
9dc9fd94 1396 mvs_dev_gone_notify(dev);
20b09c29 1397}
b5762948 1398
20b09c29
AY
1399static struct sas_task *mvs_alloc_task(void)
1400{
1401 struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL);
1402
1403 if (task) {
1404 INIT_LIST_HEAD(&task->list);
1405 spin_lock_init(&task->task_state_lock);
1406 task->task_state_flags = SAS_TASK_STATE_PENDING;
1407 init_timer(&task->timer);
1408 init_completion(&task->completion);
b5762948 1409 }
20b09c29 1410 return task;
dd4969a8 1411}
b5762948 1412
20b09c29 1413static void mvs_free_task(struct sas_task *task)
dd4969a8 1414{
20b09c29
AY
1415 if (task) {
1416 BUG_ON(!list_empty(&task->list));
1417 kfree(task);
b5762948 1418 }
20b09c29 1419}
b5762948 1420
20b09c29
AY
1421static void mvs_task_done(struct sas_task *task)
1422{
1423 if (!del_timer(&task->timer))
1424 return;
1425 complete(&task->completion);
b5762948 1426}
b5762948 1427
20b09c29 1428static void mvs_tmf_timedout(unsigned long data)
b5762948 1429{
20b09c29 1430 struct sas_task *task = (struct sas_task *)data;
8f261aaf 1431
20b09c29
AY
1432 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1433 complete(&task->completion);
1434}
8f261aaf 1435
20b09c29
AY
1436/* XXX */
1437#define MVS_TASK_TIMEOUT 20
1438static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1439 void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
1440{
1441 int res, retry;
1442 struct sas_task *task = NULL;
8f261aaf 1443
20b09c29
AY
1444 for (retry = 0; retry < 3; retry++) {
1445 task = mvs_alloc_task();
1446 if (!task)
1447 return -ENOMEM;
8f261aaf 1448
20b09c29
AY
1449 task->dev = dev;
1450 task->task_proto = dev->tproto;
8f261aaf 1451
20b09c29
AY
1452 memcpy(&task->ssp_task, parameter, para_len);
1453 task->task_done = mvs_task_done;
8f261aaf 1454
20b09c29
AY
1455 task->timer.data = (unsigned long) task;
1456 task->timer.function = mvs_tmf_timedout;
1457 task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
1458 add_timer(&task->timer);
8f261aaf 1459
0b84b709 1460 res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
8f261aaf 1461
20b09c29
AY
1462 if (res) {
1463 del_timer(&task->timer);
1464 mv_printk("executing internel task failed:%d\n", res);
1465 goto ex_err;
1466 }
8f261aaf 1467
20b09c29 1468 wait_for_completion(&task->completion);
84fbd0ce 1469 res = TMF_RESP_FUNC_FAILED;
20b09c29
AY
1470 /* Even TMF timed out, return direct. */
1471 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1472 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1473 mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
1474 goto ex_err;
1475 }
1476 }
8f261aaf 1477
20b09c29 1478 if (task->task_status.resp == SAS_TASK_COMPLETE &&
df64d3ca 1479 task->task_status.stat == SAM_STAT_GOOD) {
20b09c29
AY
1480 res = TMF_RESP_FUNC_COMPLETE;
1481 break;
1482 }
b5762948 1483
20b09c29
AY
1484 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1485 task->task_status.stat == SAS_DATA_UNDERRUN) {
1486 /* no error, but return the number of bytes of
1487 * underrun */
1488 res = task->task_status.residual;
1489 break;
1490 }
b5762948 1491
20b09c29
AY
1492 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1493 task->task_status.stat == SAS_DATA_OVERRUN) {
1494 mv_dprintk("blocked task error.\n");
1495 res = -EMSGSIZE;
1496 break;
1497 } else {
1498 mv_dprintk(" task to dev %016llx response: 0x%x "
1499 "status 0x%x\n",
1500 SAS_ADDR(dev->sas_addr),
1501 task->task_status.resp,
1502 task->task_status.stat);
1503 mvs_free_task(task);
1504 task = NULL;
b5762948 1505
dd4969a8 1506 }
dd4969a8 1507 }
20b09c29
AY
1508ex_err:
1509 BUG_ON(retry == 3 && task != NULL);
1510 if (task != NULL)
1511 mvs_free_task(task);
1512 return res;
dd4969a8 1513}
b5762948 1514
20b09c29
AY
1515static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
1516 u8 *lun, struct mvs_tmf_task *tmf)
dd4969a8 1517{
20b09c29 1518 struct sas_ssp_task ssp_task;
20b09c29
AY
1519 if (!(dev->tproto & SAS_PROTOCOL_SSP))
1520 return TMF_RESP_FUNC_ESUPP;
b5762948 1521
84fbd0ce 1522 memcpy(ssp_task.LUN, lun, 8);
b5762948 1523
20b09c29
AY
1524 return mvs_exec_internal_tmf_task(dev, &ssp_task,
1525 sizeof(ssp_task), tmf);
1526}
8f261aaf 1527
8f261aaf 1528
20b09c29
AY
1529/* Standard mandates link reset for ATA (type 0)
1530 and hard reset for SSP (type 1) , only for RECOVERY */
1531static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1532{
1533 int rc;
1534 struct sas_phy *phy = sas_find_local_phy(dev);
1535 int reset_type = (dev->dev_type == SATA_DEV ||
1536 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1537 rc = sas_phy_reset(phy, reset_type);
1538 msleep(2000);
1539 return rc;
1540}
8f261aaf 1541
20b09c29
AY
1542/* mandatory SAM-3 */
1543int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1544{
1545 unsigned long flags;
84fbd0ce 1546 int rc = TMF_RESP_FUNC_FAILED;
20b09c29 1547 struct mvs_tmf_task tmf_task;
f9da3be5 1548 struct mvs_device * mvi_dev = dev->lldd_dev;
9870d9a2 1549 struct mvs_info *mvi = mvi_dev->mvi_info;
20b09c29
AY
1550
1551 tmf_task.tmf = TMF_LU_RESET;
1552 mvi_dev->dev_status = MVS_DEV_EH;
1553 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1554 if (rc == TMF_RESP_FUNC_COMPLETE) {
20b09c29 1555 spin_lock_irqsave(&mvi->lock, flags);
84fbd0ce 1556 mvs_release_task(mvi, dev);
20b09c29 1557 spin_unlock_irqrestore(&mvi->lock, flags);
dd4969a8 1558 }
20b09c29
AY
1559 /* If failed, fall-through I_T_Nexus reset */
1560 mv_printk("%s for device[%x]:rc= %d\n", __func__,
1561 mvi_dev->device_id, rc);
1562 return rc;
1563}
8f261aaf 1564
20b09c29
AY
1565int mvs_I_T_nexus_reset(struct domain_device *dev)
1566{
1567 unsigned long flags;
9dc9fd94
S
1568 int rc = TMF_RESP_FUNC_FAILED;
1569 struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
9870d9a2 1570 struct mvs_info *mvi = mvi_dev->mvi_info;
20b09c29
AY
1571
1572 if (mvi_dev->dev_status != MVS_DEV_EH)
1573 return TMF_RESP_FUNC_COMPLETE;
84fbd0ce
XY
1574 else
1575 mvi_dev->dev_status = MVS_DEV_NORMAL;
20b09c29
AY
1576 rc = mvs_debug_I_T_nexus_reset(dev);
1577 mv_printk("%s for device[%x]:rc= %d\n",
1578 __func__, mvi_dev->device_id, rc);
1579
1580 /* housekeeper */
20b09c29 1581 spin_lock_irqsave(&mvi->lock, flags);
9dc9fd94 1582 mvs_release_task(mvi, dev);
20b09c29
AY
1583 spin_unlock_irqrestore(&mvi->lock, flags);
1584
1585 return rc;
1586}
1587/* optional SAM-3 */
1588int mvs_query_task(struct sas_task *task)
1589{
1590 u32 tag;
1591 struct scsi_lun lun;
1592 struct mvs_tmf_task tmf_task;
1593 int rc = TMF_RESP_FUNC_FAILED;
1594
1595 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1596 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1597 struct domain_device *dev = task->dev;
9870d9a2
AY
1598 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1599 struct mvs_info *mvi = mvi_dev->mvi_info;
20b09c29
AY
1600
1601 int_to_scsilun(cmnd->device->lun, &lun);
1602 rc = mvs_find_tag(mvi, task, &tag);
1603 if (rc == 0) {
1604 rc = TMF_RESP_FUNC_FAILED;
dd4969a8 1605 return rc;
20b09c29 1606 }
8f261aaf 1607
20b09c29
AY
1608 tmf_task.tmf = TMF_QUERY_TASK;
1609 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
8f261aaf 1610
20b09c29
AY
1611 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1612 switch (rc) {
1613 /* The task is still in Lun, release it then */
1614 case TMF_RESP_FUNC_SUCC:
1615 /* The task is not in Lun or failed, reset the phy */
1616 case TMF_RESP_FUNC_FAILED:
1617 case TMF_RESP_FUNC_COMPLETE:
1618 break;
1619 }
dd4969a8 1620 }
20b09c29
AY
1621 mv_printk("%s:rc= %d\n", __func__, rc);
1622 return rc;
8f261aaf
KW
1623}
1624
20b09c29
AY
1625/* mandatory SAM-3, still need free task/slot info */
1626int mvs_abort_task(struct sas_task *task)
8f261aaf 1627{
20b09c29
AY
1628 struct scsi_lun lun;
1629 struct mvs_tmf_task tmf_task;
1630 struct domain_device *dev = task->dev;
9870d9a2 1631 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
24ae163e 1632 struct mvs_info *mvi;
20b09c29
AY
1633 int rc = TMF_RESP_FUNC_FAILED;
1634 unsigned long flags;
1635 u32 tag;
9870d9a2 1636
9dc9fd94 1637 if (!mvi_dev) {
84fbd0ce
XY
1638 mv_printk("Device has removed\n");
1639 return TMF_RESP_FUNC_FAILED;
9dc9fd94
S
1640 }
1641
24ae163e
JS
1642 mvi = mvi_dev->mvi_info;
1643
20b09c29
AY
1644 spin_lock_irqsave(&task->task_state_lock, flags);
1645 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1646 spin_unlock_irqrestore(&task->task_state_lock, flags);
1647 rc = TMF_RESP_FUNC_COMPLETE;
1648 goto out;
dd4969a8 1649 }
20b09c29 1650 spin_unlock_irqrestore(&task->task_state_lock, flags);
9dc9fd94 1651 mvi_dev->dev_status = MVS_DEV_EH;
20b09c29
AY
1652 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1653 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1654
1655 int_to_scsilun(cmnd->device->lun, &lun);
1656 rc = mvs_find_tag(mvi, task, &tag);
1657 if (rc == 0) {
1658 mv_printk("No such tag in %s\n", __func__);
1659 rc = TMF_RESP_FUNC_FAILED;
1660 return rc;
1661 }
8f261aaf 1662
20b09c29
AY
1663 tmf_task.tmf = TMF_ABORT_TASK;
1664 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
8f261aaf 1665
20b09c29 1666 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
8f261aaf 1667
20b09c29
AY
1668 /* if successful, clear the task and callback forwards.*/
1669 if (rc == TMF_RESP_FUNC_COMPLETE) {
1670 u32 slot_no;
1671 struct mvs_slot_info *slot;
8f261aaf 1672
20b09c29 1673 if (task->lldd_task) {
f9da3be5 1674 slot = task->lldd_task;
20b09c29 1675 slot_no = (u32) (slot - mvi->slot_info);
9dc9fd94 1676 spin_lock_irqsave(&mvi->lock, flags);
20b09c29 1677 mvs_slot_complete(mvi, slot_no, 1);
9dc9fd94 1678 spin_unlock_irqrestore(&mvi->lock, flags);
20b09c29
AY
1679 }
1680 }
9dc9fd94 1681
20b09c29
AY
1682 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1683 task->task_proto & SAS_PROTOCOL_STP) {
1684 /* to do free register_set */
9dc9fd94
S
1685 if (SATA_DEV == dev->dev_type) {
1686 struct mvs_slot_info *slot = task->lldd_task;
9dc9fd94 1687 u32 slot_idx = (u32)(slot - mvi->slot_info);
84fbd0ce 1688 mv_dprintk("mvs_abort_task() mvi=%p task=%p "
9dc9fd94
S
1689 "slot=%p slot_idx=x%x\n",
1690 mvi, task, slot, slot_idx);
84fbd0ce 1691 mvs_tmf_timedout((unsigned long)task);
9dc9fd94 1692 mvs_slot_task_free(mvi, task, slot, slot_idx);
84fbd0ce
XY
1693 rc = TMF_RESP_FUNC_COMPLETE;
1694 goto out;
9dc9fd94 1695 }
8f261aaf 1696
20b09c29
AY
1697 }
1698out:
1699 if (rc != TMF_RESP_FUNC_COMPLETE)
1700 mv_printk("%s:rc= %d\n", __func__, rc);
dd4969a8 1701 return rc;
8f261aaf
KW
1702}
1703
20b09c29 1704int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
8f261aaf 1705{
20b09c29
AY
1706 int rc = TMF_RESP_FUNC_FAILED;
1707 struct mvs_tmf_task tmf_task;
8f261aaf 1708
20b09c29
AY
1709 tmf_task.tmf = TMF_ABORT_TASK_SET;
1710 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
dd4969a8 1711
20b09c29 1712 return rc;
8f261aaf
KW
1713}
1714
20b09c29 1715int mvs_clear_aca(struct domain_device *dev, u8 *lun)
8f261aaf 1716{
20b09c29
AY
1717 int rc = TMF_RESP_FUNC_FAILED;
1718 struct mvs_tmf_task tmf_task;
8f261aaf 1719
20b09c29
AY
1720 tmf_task.tmf = TMF_CLEAR_ACA;
1721 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
8f261aaf 1722
20b09c29
AY
1723 return rc;
1724}
8f261aaf 1725
20b09c29
AY
1726int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
1727{
1728 int rc = TMF_RESP_FUNC_FAILED;
1729 struct mvs_tmf_task tmf_task;
8f261aaf 1730
20b09c29
AY
1731 tmf_task.tmf = TMF_CLEAR_TASK_SET;
1732 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
8f261aaf 1733
20b09c29 1734 return rc;
dd4969a8 1735}
8f261aaf 1736
20b09c29
AY
1737static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1738 u32 slot_idx, int err)
dd4969a8 1739{
f9da3be5 1740 struct mvs_device *mvi_dev = task->dev->lldd_dev;
20b09c29
AY
1741 struct task_status_struct *tstat = &task->task_status;
1742 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
df64d3ca 1743 int stat = SAM_STAT_GOOD;
e9ff91b6 1744
8f261aaf 1745
20b09c29
AY
1746 resp->frame_len = sizeof(struct dev_to_host_fis);
1747 memcpy(&resp->ending_fis[0],
1748 SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
1749 sizeof(struct dev_to_host_fis));
1750 tstat->buf_valid_size = sizeof(*resp);
9dc9fd94
S
1751 if (unlikely(err)) {
1752 if (unlikely(err & CMD_ISS_STPD))
1753 stat = SAS_OPEN_REJECT;
1754 else
1755 stat = SAS_PROTO_RESPONSE;
1756 }
1757
20b09c29 1758 return stat;
8f261aaf
KW
1759}
1760
a4632aae
XY
1761void mvs_set_sense(u8 *buffer, int len, int d_sense,
1762 int key, int asc, int ascq)
1763{
1764 memset(buffer, 0, len);
1765
1766 if (d_sense) {
1767 /* Descriptor format */
1768 if (len < 4) {
1769 mv_printk("Length %d of sense buffer too small to "
1770 "fit sense %x:%x:%x", len, key, asc, ascq);
1771 }
1772
1773 buffer[0] = 0x72; /* Response Code */
1774 if (len > 1)
1775 buffer[1] = key; /* Sense Key */
1776 if (len > 2)
1777 buffer[2] = asc; /* ASC */
1778 if (len > 3)
1779 buffer[3] = ascq; /* ASCQ */
1780 } else {
1781 if (len < 14) {
1782 mv_printk("Length %d of sense buffer too small to "
1783 "fit sense %x:%x:%x", len, key, asc, ascq);
1784 }
1785
1786 buffer[0] = 0x70; /* Response Code */
1787 if (len > 2)
1788 buffer[2] = key; /* Sense Key */
1789 if (len > 7)
1790 buffer[7] = 0x0a; /* Additional Sense Length */
1791 if (len > 12)
1792 buffer[12] = asc; /* ASC */
1793 if (len > 13)
1794 buffer[13] = ascq; /* ASCQ */
1795 }
1796
1797 return;
1798}
1799
1800void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
1801 u8 key, u8 asc, u8 asc_q)
1802{
1803 iu->datapres = 2;
1804 iu->response_data_len = 0;
1805 iu->sense_data_len = 17;
1806 iu->status = 02;
1807 mvs_set_sense(iu->sense_data, 17, 0,
1808 key, asc, asc_q);
1809}
1810
20b09c29
AY
1811static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1812 u32 slot_idx)
8f261aaf 1813{
20b09c29
AY
1814 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1815 int stat;
84fbd0ce 1816 u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response);
a4632aae 1817 u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1));
20b09c29
AY
1818 u32 tfs = 0;
1819 enum mvs_port_type type = PORT_TYPE_SAS;
8f261aaf 1820
20b09c29
AY
1821 if (err_dw0 & CMD_ISS_STPD)
1822 MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
1823
1824 MVS_CHIP_DISP->command_active(mvi, slot_idx);
b5762948 1825
df64d3ca 1826 stat = SAM_STAT_CHECK_CONDITION;
dd4969a8 1827 switch (task->task_proto) {
dd4969a8 1828 case SAS_PROTOCOL_SSP:
a4632aae 1829 {
20b09c29 1830 stat = SAS_ABORTED_TASK;
a4632aae
XY
1831 if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) {
1832 struct ssp_response_iu *iu = slot->response +
1833 sizeof(struct mvs_err_info);
1834 mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01);
1835 sas_ssp_task_response(mvi->dev, task, iu);
1836 stat = SAM_STAT_CHECK_CONDITION;
1837 }
1838 if (err_dw1 & bit(31))
1839 mv_printk("reuse same slot, retry command.\n");
20b09c29 1840 break;
a4632aae 1841 }
20b09c29 1842 case SAS_PROTOCOL_SMP:
df64d3ca 1843 stat = SAM_STAT_CHECK_CONDITION;
dd4969a8 1844 break;
20b09c29 1845
dd4969a8
JG
1846 case SAS_PROTOCOL_SATA:
1847 case SAS_PROTOCOL_STP:
20b09c29
AY
1848 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1849 {
20b09c29 1850 task->ata_task.use_ncq = 0;
84fbd0ce 1851 stat = SAS_PROTO_RESPONSE;
9dc9fd94 1852 mvs_sata_done(mvi, task, slot_idx, err_dw0);
dd4969a8 1853 }
20b09c29 1854 break;
dd4969a8
JG
1855 default:
1856 break;
1857 }
1858
20b09c29 1859 return stat;
e9ff91b6
KW
1860}
1861
20b09c29 1862int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
b5762948 1863{
20b09c29
AY
1864 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1865 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1866 struct sas_task *task = slot->task;
1867 struct mvs_device *mvi_dev = NULL;
1868 struct task_status_struct *tstat;
9dc9fd94
S
1869 struct domain_device *dev;
1870 u32 aborted;
20b09c29 1871
20b09c29
AY
1872 void *to;
1873 enum exec_status sts;
1874
9dc9fd94 1875 if (unlikely(!task || !task->lldd_task || !task->dev))
20b09c29
AY
1876 return -1;
1877
1878 tstat = &task->task_status;
9dc9fd94
S
1879 dev = task->dev;
1880 mvi_dev = dev->lldd_dev;
b5762948 1881
20b09c29
AY
1882 spin_lock(&task->task_state_lock);
1883 task->task_state_flags &=
1884 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1885 task->task_state_flags |= SAS_TASK_STATE_DONE;
1886 /* race condition*/
1887 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1888 spin_unlock(&task->task_state_lock);
1889
1890 memset(tstat, 0, sizeof(*tstat));
1891 tstat->resp = SAS_TASK_COMPLETE;
1892
1893 if (unlikely(aborted)) {
1894 tstat->stat = SAS_ABORTED_TASK;
9dc9fd94
S
1895 if (mvi_dev && mvi_dev->running_req)
1896 mvi_dev->running_req--;
20b09c29
AY
1897 if (sas_protocol_ata(task->task_proto))
1898 mvs_free_reg_set(mvi, mvi_dev);
1899
1900 mvs_slot_task_free(mvi, task, slot, slot_idx);
1901 return -1;
b5762948
JG
1902 }
1903
9dc9fd94
S
1904 if (unlikely(!mvi_dev || flags)) {
1905 if (!mvi_dev)
1906 mv_dprintk("port has not device.\n");
20b09c29
AY
1907 tstat->stat = SAS_PHY_DOWN;
1908 goto out;
1909 }
b5762948 1910
20b09c29
AY
1911 /* error info record present */
1912 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
84fbd0ce
XY
1913 mv_dprintk("port %d slot %d rx_desc %X has error info"
1914 "%016llX.\n", slot->port->sas_port.id, slot_idx,
1915 rx_desc, (u64)(*(u64 *)slot->response));
20b09c29 1916 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
9dc9fd94 1917 tstat->resp = SAS_TASK_COMPLETE;
20b09c29 1918 goto out;
b5762948
JG
1919 }
1920
20b09c29
AY
1921 switch (task->task_proto) {
1922 case SAS_PROTOCOL_SSP:
1923 /* hw says status == 0, datapres == 0 */
1924 if (rx_desc & RXQ_GOOD) {
df64d3ca 1925 tstat->stat = SAM_STAT_GOOD;
20b09c29
AY
1926 tstat->resp = SAS_TASK_COMPLETE;
1927 }
1928 /* response frame present */
1929 else if (rx_desc & RXQ_RSP) {
1930 struct ssp_response_iu *iu = slot->response +
1931 sizeof(struct mvs_err_info);
1932 sas_ssp_task_response(mvi->dev, task, iu);
1933 } else
df64d3ca 1934 tstat->stat = SAM_STAT_CHECK_CONDITION;
20b09c29 1935 break;
b5762948 1936
20b09c29
AY
1937 case SAS_PROTOCOL_SMP: {
1938 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
df64d3ca 1939 tstat->stat = SAM_STAT_GOOD;
20b09c29
AY
1940 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1941 memcpy(to + sg_resp->offset,
1942 slot->response + sizeof(struct mvs_err_info),
1943 sg_dma_len(sg_resp));
1944 kunmap_atomic(to, KM_IRQ0);
1945 break;
1946 }
8f261aaf 1947
20b09c29
AY
1948 case SAS_PROTOCOL_SATA:
1949 case SAS_PROTOCOL_STP:
1950 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1951 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
1952 break;
1953 }
b5762948 1954
20b09c29 1955 default:
df64d3ca 1956 tstat->stat = SAM_STAT_CHECK_CONDITION;
20b09c29
AY
1957 break;
1958 }
9dc9fd94
S
1959 if (!slot->port->port_attached) {
1960 mv_dprintk("port %d has removed.\n", slot->port->sas_port.id);
1961 tstat->stat = SAS_PHY_DOWN;
1962 }
1963
b5762948 1964
20b09c29 1965out:
9dc9fd94
S
1966 if (mvi_dev && mvi_dev->running_req) {
1967 mvi_dev->running_req--;
1968 if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req)
0f980a87
AY
1969 mvs_free_reg_set(mvi, mvi_dev);
1970 }
20b09c29
AY
1971 mvs_slot_task_free(mvi, task, slot, slot_idx);
1972 sts = tstat->stat;
8f261aaf 1973
20b09c29
AY
1974 spin_unlock(&mvi->lock);
1975 if (task->task_done)
1976 task->task_done(task);
84fbd0ce 1977
20b09c29 1978 spin_lock(&mvi->lock);
b5762948 1979
20b09c29
AY
1980 return sts;
1981}
b5762948 1982
9dc9fd94 1983void mvs_do_release_task(struct mvs_info *mvi,
20b09c29
AY
1984 int phy_no, struct domain_device *dev)
1985{
9dc9fd94 1986 u32 slot_idx;
20b09c29
AY
1987 struct mvs_phy *phy;
1988 struct mvs_port *port;
1989 struct mvs_slot_info *slot, *slot2;
b5762948 1990
20b09c29
AY
1991 phy = &mvi->phy[phy_no];
1992 port = phy->port;
1993 if (!port)
1994 return;
9dc9fd94
S
1995 /* clean cmpl queue in case request is already finished */
1996 mvs_int_rx(mvi, false);
1997
1998
b5762948 1999
20b09c29
AY
2000 list_for_each_entry_safe(slot, slot2, &port->list, entry) {
2001 struct sas_task *task;
2002 slot_idx = (u32) (slot - mvi->slot_info);
2003 task = slot->task;
b5762948 2004
20b09c29
AY
2005 if (dev && task->dev != dev)
2006 continue;
8f261aaf 2007
20b09c29
AY
2008 mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
2009 slot_idx, slot->slot_tag, task);
9dc9fd94 2010 MVS_CHIP_DISP->command_active(mvi, slot_idx);
b5762948 2011
20b09c29 2012 mvs_slot_complete(mvi, slot_idx, 1);
b5762948 2013 }
20b09c29 2014}
b5762948 2015
9dc9fd94
S
2016void mvs_release_task(struct mvs_info *mvi,
2017 struct domain_device *dev)
2018{
2019 int i, phyno[WIDE_PORT_MAX_PHY], num;
2020 /* housekeeper */
2021 num = mvs_find_dev_phyno(dev, phyno);
2022 for (i = 0; i < num; i++)
2023 mvs_do_release_task(mvi, phyno[i], dev);
2024}
2025
20b09c29
AY
2026static void mvs_phy_disconnected(struct mvs_phy *phy)
2027{
2028 phy->phy_attached = 0;
2029 phy->att_dev_info = 0;
2030 phy->att_dev_sas_addr = 0;
2031}
2032
2033static void mvs_work_queue(struct work_struct *work)
2034{
2035 struct delayed_work *dw = container_of(work, struct delayed_work, work);
2036 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
2037 struct mvs_info *mvi = mwq->mvi;
2038 unsigned long flags;
a4632aae
XY
2039 u32 phy_no = (unsigned long) mwq->data;
2040 struct sas_ha_struct *sas_ha = mvi->sas;
2041 struct mvs_phy *phy = &mvi->phy[phy_no];
2042 struct asd_sas_phy *sas_phy = &phy->sas_phy;
b5762948 2043
20b09c29
AY
2044 spin_lock_irqsave(&mvi->lock, flags);
2045 if (mwq->handler & PHY_PLUG_EVENT) {
20b09c29
AY
2046
2047 if (phy->phy_event & PHY_PLUG_OUT) {
2048 u32 tmp;
2049 struct sas_identify_frame *id;
2050 id = (struct sas_identify_frame *)phy->frame_rcvd;
2051 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
2052 phy->phy_event &= ~PHY_PLUG_OUT;
2053 if (!(tmp & PHY_READY_MASK)) {
2054 sas_phy_disconnected(sas_phy);
2055 mvs_phy_disconnected(phy);
2056 sas_ha->notify_phy_event(sas_phy,
2057 PHYE_LOSS_OF_SIGNAL);
2058 mv_dprintk("phy%d Removed Device\n", phy_no);
2059 } else {
2060 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
2061 mvs_update_phyinfo(mvi, phy_no, 1);
2062 mvs_bytes_dmaed(mvi, phy_no);
2063 mvs_port_notify_formed(sas_phy, 0);
2064 mv_dprintk("phy%d Attached Device\n", phy_no);
2065 }
2066 }
a4632aae
XY
2067 } else if (mwq->handler & EXP_BRCT_CHG) {
2068 phy->phy_event &= ~EXP_BRCT_CHG;
2069 sas_ha->notify_port_event(sas_phy,
2070 PORTE_BROADCAST_RCVD);
2071 mv_dprintk("phy%d Got Broadcast Change\n", phy_no);
20b09c29
AY
2072 }
2073 list_del(&mwq->entry);
2074 spin_unlock_irqrestore(&mvi->lock, flags);
2075 kfree(mwq);
2076}
8f261aaf 2077
20b09c29
AY
2078static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
2079{
2080 struct mvs_wq *mwq;
2081 int ret = 0;
2082
2083 mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
2084 if (mwq) {
2085 mwq->mvi = mvi;
2086 mwq->data = data;
2087 mwq->handler = handler;
2088 MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
2089 list_add_tail(&mwq->entry, &mvi->wq_list);
2090 schedule_delayed_work(&mwq->work_q, HZ * 2);
2091 } else
2092 ret = -ENOMEM;
2093
2094 return ret;
2095}
b5762948 2096
20b09c29
AY
2097static void mvs_sig_time_out(unsigned long tphy)
2098{
2099 struct mvs_phy *phy = (struct mvs_phy *)tphy;
2100 struct mvs_info *mvi = phy->mvi;
2101 u8 phy_no;
2102
2103 for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
2104 if (&mvi->phy[phy_no] == phy) {
2105 mv_dprintk("Get signature time out, reset phy %d\n",
2106 phy_no+mvi->id*mvi->chip->n_phy);
a4632aae 2107 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET);
20b09c29 2108 }
b5762948 2109 }
20b09c29 2110}
b5762948 2111
20b09c29
AY
2112void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2113{
2114 u32 tmp;
20b09c29 2115 struct mvs_phy *phy = &mvi->phy[phy_no];
8f261aaf 2116
20b09c29 2117 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
84fbd0ce
XY
2118 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
2119 mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy,
20b09c29 2120 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
84fbd0ce 2121 mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy,
20b09c29 2122 phy->irq_status);
8f261aaf 2123
20b09c29
AY
2124 /*
2125 * events is port event now ,
2126 * we need check the interrupt status which belongs to per port.
2127 */
b5762948 2128
9dc9fd94 2129 if (phy->irq_status & PHYEV_DCDR_ERR) {
84fbd0ce 2130 mv_dprintk("phy %d STP decoding error.\n",
9dc9fd94
S
2131 phy_no + mvi->id*mvi->chip->n_phy);
2132 }
20b09c29
AY
2133
2134 if (phy->irq_status & PHYEV_POOF) {
84fbd0ce 2135 mdelay(500);
20b09c29
AY
2136 if (!(phy->phy_event & PHY_PLUG_OUT)) {
2137 int dev_sata = phy->phy_type & PORT_TYPE_SATA;
2138 int ready;
9dc9fd94 2139 mvs_do_release_task(mvi, phy_no, NULL);
20b09c29 2140 phy->phy_event |= PHY_PLUG_OUT;
9dc9fd94 2141 MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1);
20b09c29
AY
2142 mvs_handle_event(mvi,
2143 (void *)(unsigned long)phy_no,
2144 PHY_PLUG_EVENT);
2145 ready = mvs_is_phy_ready(mvi, phy_no);
20b09c29
AY
2146 if (ready || dev_sata) {
2147 if (MVS_CHIP_DISP->stp_reset)
2148 MVS_CHIP_DISP->stp_reset(mvi,
2149 phy_no);
2150 else
2151 MVS_CHIP_DISP->phy_reset(mvi,
a4632aae 2152 phy_no, MVS_SOFT_RESET);
20b09c29
AY
2153 return;
2154 }
2155 }
2156 }
b5762948 2157
20b09c29
AY
2158 if (phy->irq_status & PHYEV_COMWAKE) {
2159 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
2160 MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
2161 tmp | PHYEV_SIG_FIS);
2162 if (phy->timer.function == NULL) {
2163 phy->timer.data = (unsigned long)phy;
2164 phy->timer.function = mvs_sig_time_out;
84fbd0ce 2165 phy->timer.expires = jiffies + 5*HZ;
20b09c29
AY
2166 add_timer(&phy->timer);
2167 }
2168 }
2169 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
2170 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
20b09c29
AY
2171 mv_dprintk("notify plug in on phy[%d]\n", phy_no);
2172 if (phy->phy_status) {
2173 mdelay(10);
2174 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
2175 if (phy->phy_type & PORT_TYPE_SATA) {
2176 tmp = MVS_CHIP_DISP->read_port_irq_mask(
2177 mvi, phy_no);
2178 tmp &= ~PHYEV_SIG_FIS;
2179 MVS_CHIP_DISP->write_port_irq_mask(mvi,
2180 phy_no, tmp);
2181 }
2182 mvs_update_phyinfo(mvi, phy_no, 0);
9dc9fd94 2183 if (phy->phy_type & PORT_TYPE_SAS) {
a4632aae 2184 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE);
9dc9fd94
S
2185 mdelay(10);
2186 }
2187
20b09c29
AY
2188 mvs_bytes_dmaed(mvi, phy_no);
2189 /* whether driver is going to handle hot plug */
2190 if (phy->phy_event & PHY_PLUG_OUT) {
a4632aae 2191 mvs_port_notify_formed(&phy->sas_phy, 0);
20b09c29
AY
2192 phy->phy_event &= ~PHY_PLUG_OUT;
2193 }
2194 } else {
2195 mv_dprintk("plugin interrupt but phy%d is gone\n",
2196 phy_no + mvi->id*mvi->chip->n_phy);
2197 }
2198 } else if (phy->irq_status & PHYEV_BROAD_CH) {
84fbd0ce 2199 mv_dprintk("phy %d broadcast change.\n",
20b09c29 2200 phy_no + mvi->id*mvi->chip->n_phy);
a4632aae
XY
2201 mvs_handle_event(mvi, (void *)(unsigned long)phy_no,
2202 EXP_BRCT_CHG);
20b09c29 2203 }
b5762948
JG
2204}
2205
20b09c29 2206int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
b5762948 2207{
20b09c29
AY
2208 u32 rx_prod_idx, rx_desc;
2209 bool attn = false;
b5762948 2210
20b09c29
AY
2211 /* the first dword in the RX ring is special: it contains
2212 * a mirror of the hardware's RX producer index, so that
2213 * we don't have to stall the CPU reading that register.
2214 * The actual RX ring is offset by one dword, due to this.
2215 */
2216 rx_prod_idx = mvi->rx_cons;
2217 mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
2218 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
2219 return 0;
b5762948 2220
20b09c29
AY
2221 /* The CMPL_Q may come late, read from register and try again
2222 * note: if coalescing is enabled,
2223 * it will need to read from register every time for sure
2224 */
2225 if (unlikely(mvi->rx_cons == rx_prod_idx))
2226 mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
2227
2228 if (mvi->rx_cons == rx_prod_idx)
2229 return 0;
2230
2231 while (mvi->rx_cons != rx_prod_idx) {
2232 /* increment our internal RX consumer pointer */
2233 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
2234 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
2235
2236 if (likely(rx_desc & RXQ_DONE))
2237 mvs_slot_complete(mvi, rx_desc, 0);
2238 if (rx_desc & RXQ_ATTN) {
2239 attn = true;
2240 } else if (rx_desc & RXQ_ERR) {
2241 if (!(rx_desc & RXQ_DONE))
2242 mvs_slot_complete(mvi, rx_desc, 0);
2243 } else if (rx_desc & RXQ_SLOT_RESET) {
2244 mvs_slot_free(mvi, rx_desc);
2245 }
2246 }
2247
2248 if (attn && self_clear)
2249 MVS_CHIP_DISP->int_full(mvi);
2250 return 0;
b5762948
JG
2251}
2252
This page took 0.458093 seconds and 5 git commands to generate.