isci: exposing user parameters via module params
[deliverable/linux.git] / drivers / scsi / isci / host.c
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #include "isci.h"
57 #include "scic_io_request.h"
58 #include "scic_remote_device.h"
59 #include "scic_port.h"
60
61 #include "port.h"
62 #include "request.h"
63 #include "host.h"
64 #include "probe_roms.h"
65
66 irqreturn_t isci_msix_isr(int vec, void *data)
67 {
68 struct isci_host *ihost = data;
69 struct scic_sds_controller *scic = ihost->core_controller;
70
71 if (scic_sds_controller_isr(scic))
72 tasklet_schedule(&ihost->completion_tasklet);
73
74 return IRQ_HANDLED;
75 }
76
77 irqreturn_t isci_intx_isr(int vec, void *data)
78 {
79 struct pci_dev *pdev = data;
80 struct isci_host *ihost;
81 irqreturn_t ret = IRQ_NONE;
82 int i;
83
84 for_each_isci_host(i, ihost, pdev) {
85 struct scic_sds_controller *scic = ihost->core_controller;
86
87 if (scic_sds_controller_isr(scic)) {
88 tasklet_schedule(&ihost->completion_tasklet);
89 ret = IRQ_HANDLED;
90 } else if (scic_sds_controller_error_isr(scic)) {
91 spin_lock(&ihost->scic_lock);
92 scic_sds_controller_error_handler(scic);
93 spin_unlock(&ihost->scic_lock);
94 ret = IRQ_HANDLED;
95 }
96 }
97
98 return ret;
99 }
100
101 irqreturn_t isci_error_isr(int vec, void *data)
102 {
103 struct isci_host *ihost = data;
104 struct scic_sds_controller *scic = ihost->core_controller;
105
106 if (scic_sds_controller_error_isr(scic))
107 scic_sds_controller_error_handler(scic);
108
109 return IRQ_HANDLED;
110 }
111
112 /**
113 * isci_host_start_complete() - This function is called by the core library,
114 * through the ISCI Module, to indicate controller start status.
115 * @isci_host: This parameter specifies the ISCI host object
116 * @completion_status: This parameter specifies the completion status from the
117 * core library.
118 *
119 */
120 void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
121 {
122 if (completion_status != SCI_SUCCESS)
123 dev_info(&ihost->pdev->dev,
124 "controller start timed out, continuing...\n");
125 isci_host_change_state(ihost, isci_ready);
126 clear_bit(IHOST_START_PENDING, &ihost->flags);
127 wake_up(&ihost->eventq);
128 }
129
130 int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
131 {
132 struct isci_host *ihost = isci_host_from_sas_ha(SHOST_TO_SAS_HA(shost));
133
134 if (test_bit(IHOST_START_PENDING, &ihost->flags))
135 return 0;
136
137 /* todo: use sas_flush_discovery once it is upstream */
138 scsi_flush_work(shost);
139
140 scsi_flush_work(shost);
141
142 dev_dbg(&ihost->pdev->dev,
143 "%s: ihost->status = %d, time = %ld\n",
144 __func__, isci_host_get_state(ihost), time);
145
146 return 1;
147
148 }
149
150 void isci_host_scan_start(struct Scsi_Host *shost)
151 {
152 struct isci_host *ihost = isci_host_from_sas_ha(SHOST_TO_SAS_HA(shost));
153 struct scic_sds_controller *scic = ihost->core_controller;
154 unsigned long tmo = scic_controller_get_suggested_start_timeout(scic);
155
156 set_bit(IHOST_START_PENDING, &ihost->flags);
157
158 spin_lock_irq(&ihost->scic_lock);
159 scic_controller_start(scic, tmo);
160 scic_controller_enable_interrupts(scic);
161 spin_unlock_irq(&ihost->scic_lock);
162 }
163
164 void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
165 {
166 isci_host_change_state(ihost, isci_stopped);
167 scic_controller_disable_interrupts(ihost->core_controller);
168 clear_bit(IHOST_STOP_PENDING, &ihost->flags);
169 wake_up(&ihost->eventq);
170 }
171
172 static struct coherent_memory_info *isci_host_alloc_mdl_struct(
173 struct isci_host *isci_host,
174 u32 size)
175 {
176 struct coherent_memory_info *mdl_struct;
177 void *uncached_address = NULL;
178
179
180 mdl_struct = devm_kzalloc(&isci_host->pdev->dev,
181 sizeof(*mdl_struct),
182 GFP_KERNEL);
183 if (!mdl_struct)
184 return NULL;
185
186 INIT_LIST_HEAD(&mdl_struct->node);
187
188 uncached_address = dmam_alloc_coherent(&isci_host->pdev->dev,
189 size,
190 &mdl_struct->dma_handle,
191 GFP_KERNEL);
192 if (!uncached_address)
193 return NULL;
194
195 /* memset the whole memory area. */
196 memset((char *)uncached_address, 0, size);
197 mdl_struct->vaddr = uncached_address;
198 mdl_struct->size = (size_t)size;
199
200 return mdl_struct;
201 }
202
203 static void isci_host_build_mde(
204 struct sci_physical_memory_descriptor *mde_struct,
205 struct coherent_memory_info *mdl_struct)
206 {
207 unsigned long address = 0;
208 dma_addr_t dma_addr = 0;
209
210 address = (unsigned long)mdl_struct->vaddr;
211 dma_addr = mdl_struct->dma_handle;
212
213 /* to satisfy the alignment. */
214 if ((address % mde_struct->constant_memory_alignment) != 0) {
215 int align_offset
216 = (mde_struct->constant_memory_alignment
217 - (address % mde_struct->constant_memory_alignment));
218 address += align_offset;
219 dma_addr += align_offset;
220 }
221
222 mde_struct->virtual_address = (void *)address;
223 mde_struct->physical_address = dma_addr;
224 mdl_struct->mde = mde_struct;
225 }
226
227 static int isci_host_mdl_allocate_coherent(
228 struct isci_host *isci_host)
229 {
230 struct sci_physical_memory_descriptor *current_mde;
231 struct coherent_memory_info *mdl_struct;
232 u32 size = 0;
233
234 struct sci_base_memory_descriptor_list *mdl_handle
235 = sci_controller_get_memory_descriptor_list_handle(
236 isci_host->core_controller);
237
238 sci_mdl_first_entry(mdl_handle);
239
240 current_mde = sci_mdl_get_current_entry(mdl_handle);
241
242 while (current_mde != NULL) {
243
244 size = (current_mde->constant_memory_size
245 + current_mde->constant_memory_alignment);
246
247 mdl_struct = isci_host_alloc_mdl_struct(isci_host, size);
248 if (!mdl_struct)
249 return -ENOMEM;
250
251 list_add_tail(&mdl_struct->node, &isci_host->mdl_struct_list);
252
253 isci_host_build_mde(current_mde, mdl_struct);
254
255 sci_mdl_next_entry(mdl_handle);
256 current_mde = sci_mdl_get_current_entry(mdl_handle);
257 }
258
259 return 0;
260 }
261
262
263 /**
264 * isci_host_completion_routine() - This function is the delayed service
265 * routine that calls the sci core library's completion handler. It's
266 * scheduled as a tasklet from the interrupt service routine when interrupts
267 * in use, or set as the timeout function in polled mode.
268 * @data: This parameter specifies the ISCI host object
269 *
270 */
271 static void isci_host_completion_routine(unsigned long data)
272 {
273 struct isci_host *isci_host = (struct isci_host *)data;
274 struct list_head completed_request_list;
275 struct list_head errored_request_list;
276 struct list_head *current_position;
277 struct list_head *next_position;
278 struct isci_request *request;
279 struct isci_request *next_request;
280 struct sas_task *task;
281
282 INIT_LIST_HEAD(&completed_request_list);
283 INIT_LIST_HEAD(&errored_request_list);
284
285 spin_lock_irq(&isci_host->scic_lock);
286
287 scic_sds_controller_completion_handler(isci_host->core_controller);
288
289 /* Take the lists of completed I/Os from the host. */
290
291 list_splice_init(&isci_host->requests_to_complete,
292 &completed_request_list);
293
294 /* Take the list of errored I/Os from the host. */
295 list_splice_init(&isci_host->requests_to_errorback,
296 &errored_request_list);
297
298 spin_unlock_irq(&isci_host->scic_lock);
299
300 /* Process any completions in the lists. */
301 list_for_each_safe(current_position, next_position,
302 &completed_request_list) {
303
304 request = list_entry(current_position, struct isci_request,
305 completed_node);
306 task = isci_request_access_task(request);
307
308 /* Normal notification (task_done) */
309 dev_dbg(&isci_host->pdev->dev,
310 "%s: Normal - request/task = %p/%p\n",
311 __func__,
312 request,
313 task);
314
315 /* Return the task to libsas */
316 if (task != NULL) {
317
318 task->lldd_task = NULL;
319 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
320
321 /* If the task is already in the abort path,
322 * the task_done callback cannot be called.
323 */
324 task->task_done(task);
325 }
326 }
327 /* Free the request object. */
328 isci_request_free(isci_host, request);
329 }
330 list_for_each_entry_safe(request, next_request, &errored_request_list,
331 completed_node) {
332
333 task = isci_request_access_task(request);
334
335 /* Use sas_task_abort */
336 dev_warn(&isci_host->pdev->dev,
337 "%s: Error - request/task = %p/%p\n",
338 __func__,
339 request,
340 task);
341
342 if (task != NULL) {
343
344 /* Put the task into the abort path if it's not there
345 * already.
346 */
347 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
348 sas_task_abort(task);
349
350 } else {
351 /* This is a case where the request has completed with a
352 * status such that it needed further target servicing,
353 * but the sas_task reference has already been removed
354 * from the request. Since it was errored, it was not
355 * being aborted, so there is nothing to do except free
356 * it.
357 */
358
359 spin_lock_irq(&isci_host->scic_lock);
360 /* Remove the request from the remote device's list
361 * of pending requests.
362 */
363 list_del_init(&request->dev_node);
364 spin_unlock_irq(&isci_host->scic_lock);
365
366 /* Free the request object. */
367 isci_request_free(isci_host, request);
368 }
369 }
370
371 }
372
373 void isci_host_deinit(struct isci_host *ihost)
374 {
375 struct scic_sds_controller *scic = ihost->core_controller;
376 int i;
377
378 isci_host_change_state(ihost, isci_stopping);
379 for (i = 0; i < SCI_MAX_PORTS; i++) {
380 struct isci_port *port = &ihost->isci_ports[i];
381 struct isci_remote_device *idev, *d;
382
383 list_for_each_entry_safe(idev, d, &port->remote_dev_list, node) {
384 isci_remote_device_change_state(idev, isci_stopping);
385 isci_remote_device_stop(ihost, idev);
386 }
387 }
388
389 set_bit(IHOST_STOP_PENDING, &ihost->flags);
390
391 spin_lock_irq(&ihost->scic_lock);
392 scic_controller_stop(scic, SCIC_CONTROLLER_STOP_TIMEOUT);
393 spin_unlock_irq(&ihost->scic_lock);
394
395 wait_for_stop(ihost);
396 scic_controller_reset(scic);
397 isci_timer_list_destroy(ihost);
398 }
399
400 static void __iomem *scu_base(struct isci_host *isci_host)
401 {
402 struct pci_dev *pdev = isci_host->pdev;
403 int id = isci_host->id;
404
405 return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
406 }
407
408 static void __iomem *smu_base(struct isci_host *isci_host)
409 {
410 struct pci_dev *pdev = isci_host->pdev;
411 int id = isci_host->id;
412
413 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
414 }
415
416 static void isci_user_parameters_get(
417 struct isci_host *isci_host,
418 union scic_user_parameters *scic_user_params)
419 {
420 struct scic_sds_user_parameters *u = &scic_user_params->sds1;
421 int i;
422
423 for (i = 0; i < SCI_MAX_PHYS; i++) {
424 struct sci_phy_user_params *u_phy = &u->phys[i];
425
426 u_phy->max_speed_generation = phy_gen;
427
428 /* we are not exporting these for now */
429 u_phy->align_insertion_frequency = 0x7f;
430 u_phy->in_connection_align_insertion_frequency = 0xff;
431 u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
432 }
433
434 u->stp_inactivity_timeout = stp_inactive_to;
435 u->ssp_inactivity_timeout = ssp_inactive_to;
436 u->stp_max_occupancy_timeout = stp_max_occ_to;
437 u->ssp_max_occupancy_timeout = ssp_max_occ_to;
438 u->no_outbound_task_timeout = no_outbound_task_to;
439 u->max_number_concurrent_device_spin_up = max_concurr_spinup;
440 }
441
442 int isci_host_init(struct isci_host *isci_host)
443 {
444 int err = 0, i;
445 enum sci_status status;
446 struct scic_sds_controller *controller;
447 union scic_oem_parameters oem;
448 union scic_user_parameters scic_user_params;
449 struct isci_pci_info *pci_info = to_pci_info(isci_host->pdev);
450
451 isci_timer_list_construct(isci_host);
452
453 controller = scic_controller_alloc(&isci_host->pdev->dev);
454
455 if (!controller) {
456 dev_err(&isci_host->pdev->dev,
457 "%s: failed (%d)\n",
458 __func__,
459 err);
460 return -ENOMEM;
461 }
462
463 isci_host->core_controller = controller;
464 sci_object_set_association(isci_host->core_controller, isci_host);
465 spin_lock_init(&isci_host->state_lock);
466 spin_lock_init(&isci_host->scic_lock);
467 spin_lock_init(&isci_host->queue_lock);
468 init_waitqueue_head(&isci_host->eventq);
469
470 isci_host_change_state(isci_host, isci_starting);
471 isci_host->can_queue = ISCI_CAN_QUEUE_VAL;
472
473 status = scic_controller_construct(controller, scu_base(isci_host),
474 smu_base(isci_host));
475
476 if (status != SCI_SUCCESS) {
477 dev_err(&isci_host->pdev->dev,
478 "%s: scic_controller_construct failed - status = %x\n",
479 __func__,
480 status);
481 return -ENODEV;
482 }
483
484 isci_host->sas_ha.dev = &isci_host->pdev->dev;
485 isci_host->sas_ha.lldd_ha = isci_host;
486
487 /*
488 * grab initial values stored in the controller object for OEM and USER
489 * parameters
490 */
491 isci_user_parameters_get(isci_host, &scic_user_params);
492 status = scic_user_parameters_set(isci_host->core_controller,
493 &scic_user_params);
494 if (status != SCI_SUCCESS) {
495 dev_warn(&isci_host->pdev->dev,
496 "%s: scic_user_parameters_set failed\n",
497 __func__);
498 return -ENODEV;
499 }
500
501 scic_oem_parameters_get(controller, &oem);
502
503 /* grab any OEM parameters specified in orom */
504 if (pci_info->orom) {
505 status = isci_parse_oem_parameters(&oem,
506 pci_info->orom,
507 isci_host->id);
508 if (status != SCI_SUCCESS) {
509 dev_warn(&isci_host->pdev->dev,
510 "parsing firmware oem parameters failed\n");
511 return -EINVAL;
512 }
513 }
514
515 status = scic_oem_parameters_set(isci_host->core_controller, &oem);
516 if (status != SCI_SUCCESS) {
517 dev_warn(&isci_host->pdev->dev,
518 "%s: scic_oem_parameters_set failed\n",
519 __func__);
520 return -ENODEV;
521 }
522
523 tasklet_init(&isci_host->completion_tasklet,
524 isci_host_completion_routine, (unsigned long)isci_host);
525
526 INIT_LIST_HEAD(&(isci_host->mdl_struct_list));
527
528 INIT_LIST_HEAD(&isci_host->requests_to_complete);
529 INIT_LIST_HEAD(&isci_host->requests_to_errorback);
530
531 spin_lock_irq(&isci_host->scic_lock);
532 status = scic_controller_initialize(isci_host->core_controller);
533 spin_unlock_irq(&isci_host->scic_lock);
534 if (status != SCI_SUCCESS) {
535 dev_warn(&isci_host->pdev->dev,
536 "%s: scic_controller_initialize failed -"
537 " status = 0x%x\n",
538 __func__, status);
539 return -ENODEV;
540 }
541
542 /* populate mdl with dma memory. scu_mdl_allocate_coherent() */
543 err = isci_host_mdl_allocate_coherent(isci_host);
544 if (err)
545 return err;
546
547 /*
548 * keep the pool alloc size around, will use it for a bounds checking
549 * when trying to convert virtual addresses to physical addresses
550 */
551 isci_host->dma_pool_alloc_size = sizeof(struct isci_request) +
552 scic_io_request_get_object_size();
553 isci_host->dma_pool = dmam_pool_create(DRV_NAME, &isci_host->pdev->dev,
554 isci_host->dma_pool_alloc_size,
555 SLAB_HWCACHE_ALIGN, 0);
556
557 if (!isci_host->dma_pool)
558 return -ENOMEM;
559
560 for (i = 0; i < SCI_MAX_PORTS; i++)
561 isci_port_init(&isci_host->isci_ports[i], isci_host, i);
562
563 for (i = 0; i < SCI_MAX_PHYS; i++)
564 isci_phy_init(&isci_host->phys[i], isci_host, i);
565
566 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
567 struct isci_remote_device *idev = idev_by_id(isci_host, i);
568
569 INIT_LIST_HEAD(&idev->reqs_in_process);
570 INIT_LIST_HEAD(&idev->node);
571 spin_lock_init(&idev->state_lock);
572 }
573
574 return 0;
575 }
This page took 0.064202 seconds and 6 git commands to generate.