isci: fix "no outbound task timeout" default value
[deliverable/linux.git] / drivers / scsi / isci / core / scic_sds_controller.c
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #include <linux/device.h>
57 #include "scic_controller.h"
58 #include "scic_phy.h"
59 #include "scic_port.h"
60 #include "scic_remote_device.h"
61 #include "scic_sds_controller.h"
62 #include "scic_sds_controller_registers.h"
63 #include "scic_sds_pci.h"
64 #include "scic_sds_phy.h"
65 #include "scic_sds_port_configuration_agent.h"
66 #include "scic_sds_port.h"
67 #include "scic_sds_remote_device.h"
68 #include "scic_sds_request.h"
69 #include "scic_user_callback.h"
70 #include "sci_environment.h"
71 #include "sci_util.h"
72 #include "scu_completion_codes.h"
73 #include "scu_constants.h"
74 #include "scu_event_codes.h"
75 #include "scu_remote_node_context.h"
76 #include "scu_task_context.h"
77 #include "scu_unsolicited_frame.h"
78
79 #define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
80
81 /**
82 * smu_dcc_get_max_ports() -
83 *
84 * This macro returns the maximum number of logical ports supported by the
85 * hardware. The caller passes in the value read from the device context
86 * capacity register and this macro will mash and shift the value appropriately.
87 */
88 #define smu_dcc_get_max_ports(dcc_value) \
89 (\
90 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
91 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
92 )
93
94 /**
95 * smu_dcc_get_max_task_context() -
96 *
97 * This macro returns the maximum number of task contexts supported by the
98 * hardware. The caller passes in the value read from the device context
99 * capacity register and this macro will mash and shift the value appropriately.
100 */
101 #define smu_dcc_get_max_task_context(dcc_value) \
102 (\
103 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
104 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
105 )
106
107 /**
108 * smu_dcc_get_max_remote_node_context() -
109 *
110 * This macro returns the maximum number of remote node contexts supported by
111 * the hardware. The caller passes in the value read from the device context
112 * capacity register and this macro will mash and shift the value appropriately.
113 */
114 #define smu_dcc_get_max_remote_node_context(dcc_value) \
115 (\
116 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
117 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
118 )
119
120
121 static void scic_sds_controller_power_control_timer_handler(
122 void *controller);
123 #define SCIC_SDS_CONTROLLER_MIN_TIMER_COUNT 3
124 #define SCIC_SDS_CONTROLLER_MAX_TIMER_COUNT 3
125
126 /**
127 *
128 *
129 * The number of milliseconds to wait for a phy to start.
130 */
131 #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
132
133 /**
134 *
135 *
136 * The number of milliseconds to wait while a given phy is consuming power
137 * before allowing another set of phys to consume power. Ultimately, this will
138 * be specified by OEM parameter.
139 */
140 #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
141
142 /**
143 * COMPLETION_QUEUE_CYCLE_BIT() -
144 *
145 * This macro will return the cycle bit of the completion queue entry
146 */
147 #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
148
149 /**
150 * NORMALIZE_GET_POINTER() -
151 *
152 * This macro will normalize the completion queue get pointer so its value can
153 * be used as an index into an array
154 */
155 #define NORMALIZE_GET_POINTER(x) \
156 ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
157
158 /**
159 * NORMALIZE_PUT_POINTER() -
160 *
161 * This macro will normalize the completion queue put pointer so its value can
162 * be used as an array inde
163 */
164 #define NORMALIZE_PUT_POINTER(x) \
165 ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
166
167
168 /**
169 * NORMALIZE_GET_POINTER_CYCLE_BIT() -
170 *
171 * This macro will normalize the completion queue cycle pointer so it matches
172 * the completion queue cycle bit
173 */
174 #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
175 ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
176
177 /**
178 * NORMALIZE_EVENT_POINTER() -
179 *
180 * This macro will normalize the completion queue event entry so its value can
181 * be used as an index.
182 */
183 #define NORMALIZE_EVENT_POINTER(x) \
184 (\
185 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
186 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
187 )
188
189 /**
190 * INCREMENT_COMPLETION_QUEUE_GET() -
191 *
192 * This macro will increment the controllers completion queue index value and
193 * possibly toggle the cycle bit if the completion queue index wraps back to 0.
194 */
195 #define INCREMENT_COMPLETION_QUEUE_GET(controller, index, cycle) \
196 INCREMENT_QUEUE_GET(\
197 (index), \
198 (cycle), \
199 (controller)->completion_queue_entries, \
200 SMU_CQGR_CYCLE_BIT \
201 )
202
203 /**
204 * INCREMENT_EVENT_QUEUE_GET() -
205 *
206 * This macro will increment the controllers event queue index value and
207 * possibly toggle the event cycle bit if the event queue index wraps back to 0.
208 */
209 #define INCREMENT_EVENT_QUEUE_GET(controller, index, cycle) \
210 INCREMENT_QUEUE_GET(\
211 (index), \
212 (cycle), \
213 (controller)->completion_event_entries, \
214 SMU_CQGR_EVENT_CYCLE_BIT \
215 )
216
217 struct sci_base_memory_descriptor_list *
218 sci_controller_get_memory_descriptor_list_handle(struct scic_sds_controller *scic)
219 {
220 return &scic->parent.mdl;
221 }
222
223 /*
224 * ****************************************************************************-
225 * * SCIC SDS Controller Initialization Methods
226 * ****************************************************************************- */
227
228 /**
229 * This timer is used to start another phy after we have given up on the
230 * previous phy to transition to the ready state.
231 *
232 *
233 */
234 static void scic_sds_controller_phy_startup_timeout_handler(
235 void *controller)
236 {
237 enum sci_status status;
238 struct scic_sds_controller *this_controller;
239
240 this_controller = (struct scic_sds_controller *)controller;
241
242 this_controller->phy_startup_timer_pending = false;
243
244 status = SCI_FAILURE;
245
246 while (status != SCI_SUCCESS) {
247 status = scic_sds_controller_start_next_phy(this_controller);
248 }
249 }
250
251 /**
252 *
253 *
254 * This method initializes the phy startup operations for controller start.
255 */
256 void scic_sds_controller_initialize_phy_startup(
257 struct scic_sds_controller *this_controller)
258 {
259 this_controller->phy_startup_timer = scic_cb_timer_create(
260 this_controller,
261 scic_sds_controller_phy_startup_timeout_handler,
262 this_controller
263 );
264
265 this_controller->next_phy_to_start = 0;
266 this_controller->phy_startup_timer_pending = false;
267 }
268
269 /**
270 *
271 *
272 * This method initializes the power control operations for the controller
273 * object.
274 */
275 void scic_sds_controller_initialize_power_control(
276 struct scic_sds_controller *this_controller)
277 {
278 this_controller->power_control.timer = scic_cb_timer_create(
279 this_controller,
280 scic_sds_controller_power_control_timer_handler,
281 this_controller
282 );
283
284 memset(
285 this_controller->power_control.requesters,
286 0,
287 sizeof(this_controller->power_control.requesters)
288 );
289
290 this_controller->power_control.phys_waiting = 0;
291 }
292
293 /* --------------------------------------------------------------------------- */
294
295 #define SCU_REMOTE_NODE_CONTEXT_ALIGNMENT (32)
296 #define SCU_TASK_CONTEXT_ALIGNMENT (256)
297 #define SCU_UNSOLICITED_FRAME_ADDRESS_ALIGNMENT (64)
298 #define SCU_UNSOLICITED_FRAME_BUFFER_ALIGNMENT (1024)
299 #define SCU_UNSOLICITED_FRAME_HEADER_ALIGNMENT (64)
300
301 /* --------------------------------------------------------------------------- */
302
303 /**
304 * This method builds the memory descriptor table for this controller.
305 * @this_controller: This parameter specifies the controller object for which
306 * to build the memory table.
307 *
308 */
309 static void scic_sds_controller_build_memory_descriptor_table(
310 struct scic_sds_controller *this_controller)
311 {
312 sci_base_mde_construct(
313 &this_controller->memory_descriptors[SCU_MDE_COMPLETION_QUEUE],
314 SCU_COMPLETION_RAM_ALIGNMENT,
315 (sizeof(u32) * this_controller->completion_queue_entries),
316 (SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS)
317 );
318
319 sci_base_mde_construct(
320 &this_controller->memory_descriptors[SCU_MDE_REMOTE_NODE_CONTEXT],
321 SCU_REMOTE_NODE_CONTEXT_ALIGNMENT,
322 this_controller->remote_node_entries * sizeof(union scu_remote_node_context),
323 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
324 );
325
326 sci_base_mde_construct(
327 &this_controller->memory_descriptors[SCU_MDE_TASK_CONTEXT],
328 SCU_TASK_CONTEXT_ALIGNMENT,
329 this_controller->task_context_entries * sizeof(struct scu_task_context),
330 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
331 );
332
333 /*
334 * The UF buffer address table size must be programmed to a power
335 * of 2. Find the first power of 2 that is equal to or greater then
336 * the number of unsolicited frame buffers to be utilized. */
337 scic_sds_unsolicited_frame_control_set_address_table_count(
338 &this_controller->uf_control
339 );
340
341 sci_base_mde_construct(
342 &this_controller->memory_descriptors[SCU_MDE_UF_BUFFER],
343 SCU_UNSOLICITED_FRAME_BUFFER_ALIGNMENT,
344 scic_sds_unsolicited_frame_control_get_mde_size(this_controller->uf_control),
345 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
346 );
347 }
348
349 /**
350 * This method validates the driver supplied memory descriptor table.
351 * @this_controller:
352 *
353 * enum sci_status
354 */
355 enum sci_status scic_sds_controller_validate_memory_descriptor_table(
356 struct scic_sds_controller *this_controller)
357 {
358 bool mde_list_valid;
359
360 mde_list_valid = sci_base_mde_is_valid(
361 &this_controller->memory_descriptors[SCU_MDE_COMPLETION_QUEUE],
362 SCU_COMPLETION_RAM_ALIGNMENT,
363 (sizeof(u32) * this_controller->completion_queue_entries),
364 (SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS)
365 );
366
367 if (mde_list_valid == false)
368 return SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD;
369
370 mde_list_valid = sci_base_mde_is_valid(
371 &this_controller->memory_descriptors[SCU_MDE_REMOTE_NODE_CONTEXT],
372 SCU_REMOTE_NODE_CONTEXT_ALIGNMENT,
373 this_controller->remote_node_entries * sizeof(union scu_remote_node_context),
374 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
375 );
376
377 if (mde_list_valid == false)
378 return SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD;
379
380 mde_list_valid = sci_base_mde_is_valid(
381 &this_controller->memory_descriptors[SCU_MDE_TASK_CONTEXT],
382 SCU_TASK_CONTEXT_ALIGNMENT,
383 this_controller->task_context_entries * sizeof(struct scu_task_context),
384 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
385 );
386
387 if (mde_list_valid == false)
388 return SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD;
389
390 mde_list_valid = sci_base_mde_is_valid(
391 &this_controller->memory_descriptors[SCU_MDE_UF_BUFFER],
392 SCU_UNSOLICITED_FRAME_BUFFER_ALIGNMENT,
393 scic_sds_unsolicited_frame_control_get_mde_size(this_controller->uf_control),
394 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
395 );
396
397 if (mde_list_valid == false)
398 return SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD;
399
400 return SCI_SUCCESS;
401 }
402
403 /**
404 * This method initializes the controller with the physical memory addresses
405 * that are used to communicate with the driver.
406 * @this_controller:
407 *
408 */
409 void scic_sds_controller_ram_initialization(
410 struct scic_sds_controller *this_controller)
411 {
412 struct sci_physical_memory_descriptor *mde;
413
414 /*
415 * The completion queue is actually placed in cacheable memory
416 * Therefore it no longer comes out of memory in the MDL. */
417 mde = &this_controller->memory_descriptors[SCU_MDE_COMPLETION_QUEUE];
418 this_controller->completion_queue = (u32 *)mde->virtual_address;
419 SMU_CQBAR_WRITE(this_controller, mde->physical_address);
420
421 /*
422 * Program the location of the Remote Node Context table
423 * into the SCU. */
424 mde = &this_controller->memory_descriptors[SCU_MDE_REMOTE_NODE_CONTEXT];
425 this_controller->remote_node_context_table = (union scu_remote_node_context *)
426 mde->virtual_address;
427 SMU_RNCBAR_WRITE(this_controller, mde->physical_address);
428
429 /* Program the location of the Task Context table into the SCU. */
430 mde = &this_controller->memory_descriptors[SCU_MDE_TASK_CONTEXT];
431 this_controller->task_context_table = (struct scu_task_context *)
432 mde->virtual_address;
433 SMU_HTTBAR_WRITE(this_controller, mde->physical_address);
434
435 mde = &this_controller->memory_descriptors[SCU_MDE_UF_BUFFER];
436 scic_sds_unsolicited_frame_control_construct(
437 &this_controller->uf_control, mde, this_controller
438 );
439
440 /*
441 * Inform the silicon as to the location of the UF headers and
442 * address table. */
443 SCU_UFHBAR_WRITE(
444 this_controller,
445 this_controller->uf_control.headers.physical_address);
446 SCU_PUFATHAR_WRITE(
447 this_controller,
448 this_controller->uf_control.address_table.physical_address);
449 }
450
451 /**
452 * This method initializes the task context data for the controller.
453 * @this_controller:
454 *
455 */
456 void scic_sds_controller_assign_task_entries(
457 struct scic_sds_controller *this_controller)
458 {
459 u32 task_assignment;
460
461 /*
462 * Assign all the TCs to function 0
463 * TODO: Do we actually need to read this register to write it back? */
464 task_assignment = SMU_TCA_READ(this_controller, 0);
465
466 task_assignment =
467 (
468 task_assignment
469 | (SMU_TCA_GEN_VAL(STARTING, 0))
470 | (SMU_TCA_GEN_VAL(ENDING, this_controller->task_context_entries - 1))
471 | (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE))
472 );
473
474 SMU_TCA_WRITE(this_controller, 0, task_assignment);
475 }
476
477 /**
478 * This method initializes the hardware completion queue.
479 *
480 *
481 */
482 void scic_sds_controller_initialize_completion_queue(
483 struct scic_sds_controller *this_controller)
484 {
485 u32 index;
486 u32 completion_queue_control_value;
487 u32 completion_queue_get_value;
488 u32 completion_queue_put_value;
489
490 this_controller->completion_queue_get = 0;
491
492 completion_queue_control_value = (
493 SMU_CQC_QUEUE_LIMIT_SET(this_controller->completion_queue_entries - 1)
494 | SMU_CQC_EVENT_LIMIT_SET(this_controller->completion_event_entries - 1)
495 );
496
497 SMU_CQC_WRITE(this_controller, completion_queue_control_value);
498
499 /* Set the completion queue get pointer and enable the queue */
500 completion_queue_get_value = (
501 (SMU_CQGR_GEN_VAL(POINTER, 0))
502 | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
503 | (SMU_CQGR_GEN_BIT(ENABLE))
504 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
505 );
506
507 SMU_CQGR_WRITE(this_controller, completion_queue_get_value);
508
509 /* Set the completion queue put pointer */
510 completion_queue_put_value = (
511 (SMU_CQPR_GEN_VAL(POINTER, 0))
512 | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
513 );
514
515 SMU_CQPR_WRITE(this_controller, completion_queue_put_value);
516
517 /* Initialize the cycle bit of the completion queue entries */
518 for (index = 0; index < this_controller->completion_queue_entries; index++) {
519 /*
520 * If get.cycle_bit != completion_queue.cycle_bit
521 * its not a valid completion queue entry
522 * so at system start all entries are invalid */
523 this_controller->completion_queue[index] = 0x80000000;
524 }
525 }
526
527 /**
528 * This method initializes the hardware unsolicited frame queue.
529 *
530 *
531 */
532 void scic_sds_controller_initialize_unsolicited_frame_queue(
533 struct scic_sds_controller *this_controller)
534 {
535 u32 frame_queue_control_value;
536 u32 frame_queue_get_value;
537 u32 frame_queue_put_value;
538
539 /* Write the queue size */
540 frame_queue_control_value =
541 SCU_UFQC_GEN_VAL(QUEUE_SIZE, this_controller->uf_control.address_table.count);
542
543 SCU_UFQC_WRITE(this_controller, frame_queue_control_value);
544
545 /* Setup the get pointer for the unsolicited frame queue */
546 frame_queue_get_value = (
547 SCU_UFQGP_GEN_VAL(POINTER, 0)
548 | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
549 );
550
551 SCU_UFQGP_WRITE(this_controller, frame_queue_get_value);
552
553 /* Setup the put pointer for the unsolicited frame queue */
554 frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
555
556 SCU_UFQPP_WRITE(this_controller, frame_queue_put_value);
557 }
558
559 /**
560 * This method enables the hardware port task scheduler.
561 *
562 *
563 */
564 void scic_sds_controller_enable_port_task_scheduler(
565 struct scic_sds_controller *this_controller)
566 {
567 u32 port_task_scheduler_value;
568
569 port_task_scheduler_value = SCU_PTSGCR_READ(this_controller);
570
571 port_task_scheduler_value |=
572 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) | SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
573
574 SCU_PTSGCR_WRITE(this_controller, port_task_scheduler_value);
575 }
576
577 /* --------------------------------------------------------------------------- */
578
579 /**
580 *
581 *
582 * This macro is used to delay between writes to the AFE registers during AFE
583 * initialization.
584 */
585 #define AFE_REGISTER_WRITE_DELAY 10
586
587 static bool is_a0(void)
588 {
589 return isci_si_rev == ISCI_SI_REVA0;
590 }
591
592 static bool is_a2(void)
593 {
594 return isci_si_rev == ISCI_SI_REVA2;
595 }
596
597 static bool is_b0(void)
598 {
599 return isci_si_rev > ISCI_SI_REVA2;
600 }
601
602 /* Initialize the AFE for this phy index. We need to read the AFE setup from
603 * the OEM parameters none
604 */
605 void scic_sds_controller_afe_initialization(struct scic_sds_controller *scic)
606 {
607 u32 afe_status;
608 u32 phy_id;
609
610 /* Clear DFX Status registers */
611 scu_afe_register_write(scic, afe_dfx_master_control0, 0x0081000f);
612 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
613
614 /* Configure bias currents to normal */
615 if (is_a0())
616 scu_afe_register_write(scic, afe_bias_control, 0x00005500);
617 else
618 scu_afe_register_write(scic, afe_bias_control, 0x00005A00);
619
620
621 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
622
623 /* Enable PLL */
624 if (is_b0())
625 scu_afe_register_write(scic, afe_pll_control0, 0x80040A08);
626 else
627 scu_afe_register_write(scic, afe_pll_control0, 0x80040908);
628
629 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
630
631 /* Wait for the PLL to lock */
632 do {
633 afe_status = scu_afe_register_read(
634 scic, afe_common_block_status);
635 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
636 } while ((afe_status & 0x00001000) == 0);
637
638 if (is_b0()) {
639 /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
640 scu_afe_register_write(scic, afe_pmsn_master_control0, 0x7bcc96ad);
641 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
642 }
643
644 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
645 if (is_b0()) {
646 /* Configure transmitter SSC parameters */
647 scu_afe_txreg_write(scic, phy_id, afe_tx_ssc_control, 0x00030000);
648 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
649 } else {
650 /*
651 * All defaults, except the Receive Word Alignament/Comma Detect
652 * Enable....(0xe800) */
653 scu_afe_txreg_write(scic, phy_id, afe_xcvr_control0, 0x00004512);
654 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
655
656 scu_afe_txreg_write(scic, phy_id, afe_xcvr_control1, 0x0050100F);
657 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
658 }
659
660 /*
661 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
662 * & increase TX int & ext bias 20%....(0xe85c) */
663 if (is_a0())
664 scu_afe_txreg_write(scic, phy_id, afe_channel_control, 0x000003D4);
665 else if (is_a2())
666 scu_afe_txreg_write(scic, phy_id, afe_channel_control, 0x000003F0);
667 else {
668 /* Power down TX and RX (PWRDNTX and PWRDNRX) */
669 scu_afe_txreg_write(scic, phy_id, afe_channel_control, 0x000003d7);
670 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
671
672 /*
673 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
674 * & increase TX int & ext bias 20%....(0xe85c) */
675 scu_afe_txreg_write(scic, phy_id, afe_channel_control, 0x000003d4);
676 }
677 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
678
679 if (is_a0() || is_a2()) {
680 /* Enable TX equalization (0xe824) */
681 scu_afe_txreg_write(scic, phy_id, afe_tx_control, 0x00040000);
682 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
683 }
684
685 /*
686 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
687 * RDD=0x0(RX Detect Enabled) ....(0xe800) */
688 scu_afe_txreg_write(scic, phy_id, afe_xcvr_control0, 0x00004100);
689 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
690
691 /* Leave DFE/FFE on */
692 if (is_a0())
693 scu_afe_txreg_write(scic, phy_id, afe_rx_ssc_control0, 0x3F09983F);
694 else if (is_a2())
695 scu_afe_txreg_write(scic, phy_id, afe_rx_ssc_control0, 0x3F11103F);
696 else {
697 scu_afe_txreg_write(scic, phy_id, afe_rx_ssc_control0, 0x3F11103F);
698 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
699 /* Enable TX equalization (0xe824) */
700 scu_afe_txreg_write(scic, phy_id, afe_tx_control, 0x00040000);
701 }
702 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
703
704 scu_afe_txreg_write(scic, phy_id, afe_tx_amp_control0, 0x000E7C03);
705 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
706
707 scu_afe_txreg_write(scic, phy_id, afe_tx_amp_control1, 0x000E7C03);
708 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
709
710 scu_afe_txreg_write(scic, phy_id, afe_tx_amp_control2, 0x000E7C03);
711 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
712
713 scu_afe_txreg_write(scic, phy_id, afe_tx_amp_control3, 0x000E7C03);
714 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
715 }
716
717 /* Transfer control to the PEs */
718 scu_afe_register_write(scic, afe_dfx_master_control0, 0x00010f00);
719 scic_cb_stall_execution(AFE_REGISTER_WRITE_DELAY);
720 }
721
722 /*
723 * ****************************************************************************-
724 * * SCIC SDS Controller Internal Start/Stop Routines
725 * ****************************************************************************- */
726
727
728 /**
729 * This method will attempt to transition into the ready state for the
730 * controller and indicate that the controller start operation has completed
731 * if all criteria are met.
732 * @this_controller: This parameter indicates the controller object for which
733 * to transition to ready.
734 * @status: This parameter indicates the status value to be pass into the call
735 * to scic_cb_controller_start_complete().
736 *
737 * none.
738 */
739 static void scic_sds_controller_transition_to_ready(
740 struct scic_sds_controller *this_controller,
741 enum sci_status status)
742 {
743 if (this_controller->parent.state_machine.current_state_id
744 == SCI_BASE_CONTROLLER_STATE_STARTING) {
745 /*
746 * We move into the ready state, because some of the phys/ports
747 * may be up and operational. */
748 sci_base_state_machine_change_state(
749 scic_sds_controller_get_base_state_machine(this_controller),
750 SCI_BASE_CONTROLLER_STATE_READY
751 );
752
753 scic_cb_controller_start_complete(this_controller, status);
754 }
755 }
756
757 /**
758 * This method is the general timeout handler for the controller. It will take
759 * the correct timetout action based on the current controller state
760 */
761 void scic_sds_controller_timeout_handler(
762 struct scic_sds_controller *scic)
763 {
764 enum sci_base_controller_states current_state;
765
766 current_state = sci_base_state_machine_get_state(
767 scic_sds_controller_get_base_state_machine(scic));
768
769 if (current_state == SCI_BASE_CONTROLLER_STATE_STARTING) {
770 scic_sds_controller_transition_to_ready(
771 scic, SCI_FAILURE_TIMEOUT);
772 } else if (current_state == SCI_BASE_CONTROLLER_STATE_STOPPING) {
773 sci_base_state_machine_change_state(
774 scic_sds_controller_get_base_state_machine(scic),
775 SCI_BASE_CONTROLLER_STATE_FAILED);
776 scic_cb_controller_stop_complete(scic, SCI_FAILURE_TIMEOUT);
777 } else /* / @todo Now what do we want to do in this case? */
778 dev_err(scic_to_dev(scic),
779 "%s: Controller timer fired when controller was not "
780 "in a state being timed.\n",
781 __func__);
782 }
783
784 /**
785 * scic_sds_controller_get_port_configuration_mode
786 * @this_controller: This is the controller to use to determine if we are using
787 * manual or automatic port configuration.
788 *
789 * SCIC_PORT_CONFIGURATION_MODE
790 */
791 enum SCIC_PORT_CONFIGURATION_MODE scic_sds_controller_get_port_configuration_mode(
792 struct scic_sds_controller *this_controller)
793 {
794 u32 index;
795 enum SCIC_PORT_CONFIGURATION_MODE mode;
796
797 mode = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
798
799 for (index = 0; index < SCI_MAX_PORTS; index++) {
800 if (this_controller->oem_parameters.sds1.ports[index].phy_mask != 0) {
801 mode = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
802 break;
803 }
804 }
805
806 return mode;
807 }
808
809 enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller *scic)
810 {
811 u32 index;
812 enum sci_status port_status;
813 enum sci_status status = SCI_SUCCESS;
814
815 for (index = 0; index < scic->logical_port_entries; index++) {
816 port_status = scic_port_stop(&scic->port_table[index]);
817
818 if ((port_status != SCI_SUCCESS) &&
819 (port_status != SCI_FAILURE_INVALID_STATE)) {
820 status = SCI_FAILURE;
821
822 dev_warn(scic_to_dev(scic),
823 "%s: Controller stop operation failed to "
824 "stop port %d because of status %d.\n",
825 __func__,
826 scic->port_table[index].logical_port_index,
827 port_status);
828 }
829 }
830
831 return status;
832 }
833
834 /**
835 *
836 *
837 *
838 */
839 static void scic_sds_controller_phy_timer_start(
840 struct scic_sds_controller *this_controller)
841 {
842 scic_cb_timer_start(
843 this_controller,
844 this_controller->phy_startup_timer,
845 SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
846 );
847
848 this_controller->phy_startup_timer_pending = true;
849 }
850
851 /**
852 *
853 *
854 *
855 */
856 void scic_sds_controller_phy_timer_stop(
857 struct scic_sds_controller *this_controller)
858 {
859 scic_cb_timer_stop(
860 this_controller,
861 this_controller->phy_startup_timer
862 );
863
864 this_controller->phy_startup_timer_pending = false;
865 }
866
867 /**
868 * This method is called internally by the controller object to start the next
869 * phy on the controller. If all the phys have been starte, then this
870 * method will attempt to transition the controller to the READY state and
871 * inform the user (scic_cb_controller_start_complete()).
872 * @this_controller: This parameter specifies the controller object for which
873 * to start the next phy.
874 *
875 * enum sci_status
876 */
877 enum sci_status scic_sds_controller_start_next_phy(
878 struct scic_sds_controller *this_controller)
879 {
880 enum sci_status status;
881
882 status = SCI_SUCCESS;
883
884 if (this_controller->phy_startup_timer_pending == false) {
885 if (this_controller->next_phy_to_start == SCI_MAX_PHYS) {
886 bool is_controller_start_complete = true;
887 struct scic_sds_phy *the_phy;
888 u8 index;
889
890 for (index = 0; index < SCI_MAX_PHYS; index++) {
891 the_phy = &this_controller->phy_table[index];
892
893 if (scic_sds_phy_get_port(the_phy) != NULL) {
894 /**
895 * The controller start operation is complete if and only
896 * if:
897 * - all links have been given an opportunity to start
898 * - have no indication of a connected device
899 * - have an indication of a connected device and it has
900 * finished the link training process.
901 */
902 if (
903 (
904 (the_phy->is_in_link_training == false)
905 && (the_phy->parent.state_machine.current_state_id
906 == SCI_BASE_PHY_STATE_INITIAL)
907 )
908 || (
909 (the_phy->is_in_link_training == false)
910 && (the_phy->parent.state_machine.current_state_id
911 == SCI_BASE_PHY_STATE_STOPPED)
912 )
913 || (
914 (the_phy->is_in_link_training == true)
915 && (the_phy->parent.state_machine.current_state_id
916 == SCI_BASE_PHY_STATE_STARTING)
917 )
918 ) {
919 is_controller_start_complete = false;
920 break;
921 }
922 }
923 }
924
925 /*
926 * The controller has successfully finished the start process.
927 * Inform the SCI Core user and transition to the READY state. */
928 if (is_controller_start_complete == true) {
929 scic_sds_controller_transition_to_ready(
930 this_controller, SCI_SUCCESS
931 );
932 scic_sds_controller_phy_timer_stop(this_controller);
933 }
934 } else {
935 struct scic_sds_phy *the_phy;
936
937 the_phy = &this_controller->phy_table[this_controller->next_phy_to_start];
938
939 if (
940 scic_sds_controller_get_port_configuration_mode(this_controller)
941 == SCIC_PORT_MANUAL_CONFIGURATION_MODE
942 ) {
943 if (scic_sds_phy_get_port(the_phy) == NULL) {
944 this_controller->next_phy_to_start++;
945
946 /*
947 * Caution recursion ahead be forwarned
948 *
949 * The PHY was never added to a PORT in MPC mode so start the next phy in sequence
950 * This phy will never go link up and will not draw power the OEM parameters either
951 * configured the phy incorrectly for the PORT or it was never assigned to a PORT */
952 return scic_sds_controller_start_next_phy(this_controller);
953 }
954 }
955
956 status = scic_sds_phy_start(the_phy);
957
958 if (status == SCI_SUCCESS) {
959 scic_sds_controller_phy_timer_start(this_controller);
960 } else {
961 dev_warn(scic_to_dev(this_controller),
962 "%s: Controller stop operation failed "
963 "to stop phy %d because of status "
964 "%d.\n",
965 __func__,
966 this_controller->phy_table[this_controller->next_phy_to_start].phy_index,
967 status);
968 }
969
970 this_controller->next_phy_to_start++;
971 }
972 }
973
974 return status;
975 }
976
977 /**
978 *
979 * @this_controller:
980 *
981 * enum sci_status
982 */
983 enum sci_status scic_sds_controller_stop_phys(
984 struct scic_sds_controller *this_controller)
985 {
986 u32 index;
987 enum sci_status status;
988 enum sci_status phy_status;
989
990 status = SCI_SUCCESS;
991
992 for (index = 0; index < SCI_MAX_PHYS; index++) {
993 phy_status = scic_sds_phy_stop(&this_controller->phy_table[index]);
994
995 if (
996 (phy_status != SCI_SUCCESS)
997 && (phy_status != SCI_FAILURE_INVALID_STATE)
998 ) {
999 status = SCI_FAILURE;
1000
1001 dev_warn(scic_to_dev(this_controller),
1002 "%s: Controller stop operation failed to stop "
1003 "phy %d because of status %d.\n",
1004 __func__,
1005 this_controller->phy_table[index].phy_index, phy_status);
1006 }
1007 }
1008
1009 return status;
1010 }
1011
1012 /**
1013 *
1014 * @this_controller:
1015 *
1016 * enum sci_status
1017 */
1018 enum sci_status scic_sds_controller_stop_devices(
1019 struct scic_sds_controller *this_controller)
1020 {
1021 u32 index;
1022 enum sci_status status;
1023 enum sci_status device_status;
1024
1025 status = SCI_SUCCESS;
1026
1027 for (index = 0; index < this_controller->remote_node_entries; index++) {
1028 if (this_controller->device_table[index] != NULL) {
1029 /* / @todo What timeout value do we want to provide to this request? */
1030 device_status = scic_remote_device_stop(this_controller->device_table[index], 0);
1031
1032 if ((device_status != SCI_SUCCESS) &&
1033 (device_status != SCI_FAILURE_INVALID_STATE)) {
1034 dev_warn(scic_to_dev(this_controller),
1035 "%s: Controller stop operation failed "
1036 "to stop device 0x%p because of "
1037 "status %d.\n",
1038 __func__,
1039 this_controller->device_table[index], device_status);
1040 }
1041 }
1042 }
1043
1044 return status;
1045 }
1046
1047 /*
1048 * ****************************************************************************-
1049 * * SCIC SDS Controller Power Control (Staggered Spinup)
1050 * ****************************************************************************- */
1051
1052 /**
1053 *
1054 *
1055 * This method starts the power control timer for this controller object.
1056 */
1057 static void scic_sds_controller_power_control_timer_start(
1058 struct scic_sds_controller *this_controller)
1059 {
1060 scic_cb_timer_start(
1061 this_controller, this_controller->power_control.timer,
1062 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL
1063 );
1064
1065 this_controller->power_control.timer_started = true;
1066 }
1067
1068 /**
1069 *
1070 *
1071 *
1072 */
1073 static void scic_sds_controller_power_control_timer_handler(
1074 void *controller)
1075 {
1076 struct scic_sds_controller *this_controller;
1077
1078 this_controller = (struct scic_sds_controller *)controller;
1079
1080 if (this_controller->power_control.phys_waiting == 0) {
1081 this_controller->power_control.timer_started = false;
1082 } else {
1083 struct scic_sds_phy *the_phy = NULL;
1084 u8 i;
1085
1086 for (i = 0;
1087 (i < SCI_MAX_PHYS)
1088 && (this_controller->power_control.phys_waiting != 0);
1089 i++) {
1090 if (this_controller->power_control.requesters[i] != NULL) {
1091 the_phy = this_controller->power_control.requesters[i];
1092 this_controller->power_control.requesters[i] = NULL;
1093 this_controller->power_control.phys_waiting--;
1094 break;
1095 }
1096 }
1097
1098 /*
1099 * It doesn't matter if the power list is empty, we need to start the
1100 * timer in case another phy becomes ready. */
1101 scic_sds_controller_power_control_timer_start(this_controller);
1102
1103 scic_sds_phy_consume_power_handler(the_phy);
1104 }
1105 }
1106
1107 /**
1108 * This method inserts the phy in the stagger spinup control queue.
1109 * @this_controller:
1110 *
1111 *
1112 */
1113 void scic_sds_controller_power_control_queue_insert(
1114 struct scic_sds_controller *this_controller,
1115 struct scic_sds_phy *the_phy)
1116 {
1117 BUG_ON(the_phy == NULL);
1118
1119 if (
1120 (this_controller->power_control.timer_started)
1121 && (this_controller->power_control.requesters[the_phy->phy_index] == NULL)
1122 ) {
1123 this_controller->power_control.requesters[the_phy->phy_index] = the_phy;
1124 this_controller->power_control.phys_waiting++;
1125 } else {
1126 scic_sds_controller_power_control_timer_start(this_controller);
1127 scic_sds_phy_consume_power_handler(the_phy);
1128 }
1129 }
1130
1131 /**
1132 * This method removes the phy from the stagger spinup control queue.
1133 * @this_controller:
1134 *
1135 *
1136 */
1137 void scic_sds_controller_power_control_queue_remove(
1138 struct scic_sds_controller *this_controller,
1139 struct scic_sds_phy *the_phy)
1140 {
1141 BUG_ON(the_phy == NULL);
1142
1143 if (this_controller->power_control.requesters[the_phy->phy_index] != NULL) {
1144 this_controller->power_control.phys_waiting--;
1145 }
1146
1147 this_controller->power_control.requesters[the_phy->phy_index] = NULL;
1148 }
1149
1150 /*
1151 * ****************************************************************************-
1152 * * SCIC SDS Controller Completion Routines
1153 * ****************************************************************************- */
1154
1155 /**
1156 * This method returns a true value if the completion queue has entries that
1157 * can be processed
1158 * @this_controller:
1159 *
1160 * bool true if the completion queue has entries to process false if the
1161 * completion queue has no entries to process
1162 */
1163 static bool scic_sds_controller_completion_queue_has_entries(
1164 struct scic_sds_controller *this_controller)
1165 {
1166 u32 get_value = this_controller->completion_queue_get;
1167 u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
1168
1169 if (
1170 NORMALIZE_GET_POINTER_CYCLE_BIT(get_value)
1171 == COMPLETION_QUEUE_CYCLE_BIT(this_controller->completion_queue[get_index])
1172 ) {
1173 return true;
1174 }
1175
1176 return false;
1177 }
1178
1179 /* --------------------------------------------------------------------------- */
1180
1181 /**
1182 * This method processes a task completion notification. This is called from
1183 * within the controller completion handler.
1184 * @this_controller:
1185 * @completion_entry:
1186 *
1187 */
1188 static void scic_sds_controller_task_completion(
1189 struct scic_sds_controller *this_controller,
1190 u32 completion_entry)
1191 {
1192 u32 index;
1193 struct scic_sds_request *io_request;
1194
1195 index = SCU_GET_COMPLETION_INDEX(completion_entry);
1196 io_request = this_controller->io_request_table[index];
1197
1198 /* Make sure that we really want to process this IO request */
1199 if (
1200 (io_request != NULL)
1201 && (io_request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG)
1202 && (
1203 scic_sds_io_tag_get_sequence(io_request->io_tag)
1204 == this_controller->io_request_sequence[index]
1205 )
1206 ) {
1207 /* Yep this is a valid io request pass it along to the io request handler */
1208 scic_sds_io_request_tc_completion(io_request, completion_entry);
1209 }
1210 }
1211
1212 /**
1213 * This method processes an SDMA completion event. This is called from within
1214 * the controller completion handler.
1215 * @this_controller:
1216 * @completion_entry:
1217 *
1218 */
1219 static void scic_sds_controller_sdma_completion(
1220 struct scic_sds_controller *this_controller,
1221 u32 completion_entry)
1222 {
1223 u32 index;
1224 struct scic_sds_request *io_request;
1225 struct scic_sds_remote_device *device;
1226
1227 index = SCU_GET_COMPLETION_INDEX(completion_entry);
1228
1229 switch (scu_get_command_request_type(completion_entry)) {
1230 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
1231 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
1232 io_request = this_controller->io_request_table[index];
1233 dev_warn(scic_to_dev(this_controller),
1234 "%s: SCIC SDS Completion type SDMA %x for io request "
1235 "%p\n",
1236 __func__,
1237 completion_entry,
1238 io_request);
1239 /* @todo For a post TC operation we need to fail the IO
1240 * request
1241 */
1242 break;
1243
1244 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
1245 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
1246 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
1247 device = this_controller->device_table[index];
1248 dev_warn(scic_to_dev(this_controller),
1249 "%s: SCIC SDS Completion type SDMA %x for remote "
1250 "device %p\n",
1251 __func__,
1252 completion_entry,
1253 device);
1254 /* @todo For a port RNC operation we need to fail the
1255 * device
1256 */
1257 break;
1258
1259 default:
1260 dev_warn(scic_to_dev(this_controller),
1261 "%s: SCIC SDS Completion unknown SDMA completion "
1262 "type %x\n",
1263 __func__,
1264 completion_entry);
1265 break;
1266
1267 }
1268 }
1269
1270 /**
1271 *
1272 * @this_controller:
1273 * @completion_entry:
1274 *
1275 * This method processes an unsolicited frame message. This is called from
1276 * within the controller completion handler. none
1277 */
1278 static void scic_sds_controller_unsolicited_frame(
1279 struct scic_sds_controller *this_controller,
1280 u32 completion_entry)
1281 {
1282 u32 index;
1283 u32 frame_index;
1284
1285 struct scu_unsolicited_frame_header *frame_header;
1286 struct scic_sds_phy *phy;
1287 struct scic_sds_remote_device *device;
1288
1289 enum sci_status result = SCI_FAILURE;
1290
1291 frame_index = SCU_GET_FRAME_INDEX(completion_entry);
1292
1293 frame_header
1294 = this_controller->uf_control.buffers.array[frame_index].header;
1295 this_controller->uf_control.buffers.array[frame_index].state
1296 = UNSOLICITED_FRAME_IN_USE;
1297
1298 if (SCU_GET_FRAME_ERROR(completion_entry)) {
1299 /*
1300 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
1301 * / this cause a problem? We expect the phy initialization will
1302 * / fail if there is an error in the frame. */
1303 scic_sds_controller_release_frame(this_controller, frame_index);
1304 return;
1305 }
1306
1307 if (frame_header->is_address_frame) {
1308 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
1309 phy = &this_controller->phy_table[index];
1310 if (phy != NULL) {
1311 result = scic_sds_phy_frame_handler(phy, frame_index);
1312 }
1313 } else {
1314
1315 index = SCU_GET_COMPLETION_INDEX(completion_entry);
1316
1317 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
1318 /*
1319 * This is a signature fis or a frame from a direct attached SATA
1320 * device that has not yet been created. In either case forwared
1321 * the frame to the PE and let it take care of the frame data. */
1322 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
1323 phy = &this_controller->phy_table[index];
1324 result = scic_sds_phy_frame_handler(phy, frame_index);
1325 } else {
1326 if (index < this_controller->remote_node_entries)
1327 device = this_controller->device_table[index];
1328 else
1329 device = NULL;
1330
1331 if (device != NULL)
1332 result = scic_sds_remote_device_frame_handler(device, frame_index);
1333 else
1334 scic_sds_controller_release_frame(this_controller, frame_index);
1335 }
1336 }
1337
1338 if (result != SCI_SUCCESS) {
1339 /*
1340 * / @todo Is there any reason to report some additional error message
1341 * / when we get this failure notifiction? */
1342 }
1343 }
1344
1345 /**
1346 * This method processes an event completion entry. This is called from within
1347 * the controller completion handler.
1348 * @this_controller:
1349 * @completion_entry:
1350 *
1351 */
1352 static void scic_sds_controller_event_completion(
1353 struct scic_sds_controller *this_controller,
1354 u32 completion_entry)
1355 {
1356 u32 index;
1357 struct scic_sds_request *io_request;
1358 struct scic_sds_remote_device *device;
1359 struct scic_sds_phy *phy;
1360
1361 index = SCU_GET_COMPLETION_INDEX(completion_entry);
1362
1363 switch (scu_get_event_type(completion_entry)) {
1364 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
1365 /* / @todo The driver did something wrong and we need to fix the condtion. */
1366 dev_err(scic_to_dev(this_controller),
1367 "%s: SCIC Controller 0x%p received SMU command error "
1368 "0x%x\n",
1369 __func__,
1370 this_controller,
1371 completion_entry);
1372 break;
1373
1374 case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
1375 case SCU_EVENT_TYPE_SMU_ERROR:
1376 case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
1377 /*
1378 * / @todo This is a hardware failure and its likely that we want to
1379 * / reset the controller. */
1380 dev_err(scic_to_dev(this_controller),
1381 "%s: SCIC Controller 0x%p received fatal controller "
1382 "event 0x%x\n",
1383 __func__,
1384 this_controller,
1385 completion_entry);
1386 break;
1387
1388 case SCU_EVENT_TYPE_TRANSPORT_ERROR:
1389 io_request = this_controller->io_request_table[index];
1390 scic_sds_io_request_event_handler(io_request, completion_entry);
1391 break;
1392
1393 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
1394 switch (scu_get_event_specifier(completion_entry)) {
1395 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
1396 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
1397 io_request = this_controller->io_request_table[index];
1398 if (io_request != NULL)
1399 scic_sds_io_request_event_handler(io_request, completion_entry);
1400 else
1401 dev_warn(scic_to_dev(this_controller),
1402 "%s: SCIC Controller 0x%p received "
1403 "event 0x%x for io request object "
1404 "that doesnt exist.\n",
1405 __func__,
1406 this_controller,
1407 completion_entry);
1408
1409 break;
1410
1411 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
1412 device = this_controller->device_table[index];
1413 if (device != NULL)
1414 scic_sds_remote_device_event_handler(device, completion_entry);
1415 else
1416 dev_warn(scic_to_dev(this_controller),
1417 "%s: SCIC Controller 0x%p received "
1418 "event 0x%x for remote device object "
1419 "that doesnt exist.\n",
1420 __func__,
1421 this_controller,
1422 completion_entry);
1423
1424 break;
1425 }
1426 break;
1427
1428 case SCU_EVENT_TYPE_BROADCAST_CHANGE:
1429 /*
1430 * direct the broadcast change event to the phy first and then let
1431 * the phy redirect the broadcast change to the port object */
1432 case SCU_EVENT_TYPE_ERR_CNT_EVENT:
1433 /*
1434 * direct error counter event to the phy object since that is where
1435 * we get the event notification. This is a type 4 event. */
1436 case SCU_EVENT_TYPE_OSSP_EVENT:
1437 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
1438 phy = &this_controller->phy_table[index];
1439 scic_sds_phy_event_handler(phy, completion_entry);
1440 break;
1441
1442 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
1443 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
1444 case SCU_EVENT_TYPE_RNC_OPS_MISC:
1445 if (index < this_controller->remote_node_entries) {
1446 device = this_controller->device_table[index];
1447
1448 if (device != NULL)
1449 scic_sds_remote_device_event_handler(device, completion_entry);
1450 } else
1451 dev_err(scic_to_dev(this_controller),
1452 "%s: SCIC Controller 0x%p received event 0x%x "
1453 "for remote device object 0x%0x that doesnt "
1454 "exist.\n",
1455 __func__,
1456 this_controller,
1457 completion_entry,
1458 index);
1459
1460 break;
1461
1462 default:
1463 dev_warn(scic_to_dev(this_controller),
1464 "%s: SCIC Controller received unknown event code %x\n",
1465 __func__,
1466 completion_entry);
1467 break;
1468 }
1469 }
1470
1471 /**
1472 * This method is a private routine for processing the completion queue entries.
1473 * @this_controller:
1474 *
1475 */
1476 static void scic_sds_controller_process_completions(
1477 struct scic_sds_controller *this_controller)
1478 {
1479 u32 completion_count = 0;
1480 u32 completion_entry;
1481 u32 get_index;
1482 u32 get_cycle;
1483 u32 event_index;
1484 u32 event_cycle;
1485
1486 dev_dbg(scic_to_dev(this_controller),
1487 "%s: completion queue begining get:0x%08x\n",
1488 __func__,
1489 this_controller->completion_queue_get);
1490
1491 /* Get the component parts of the completion queue */
1492 get_index = NORMALIZE_GET_POINTER(this_controller->completion_queue_get);
1493 get_cycle = SMU_CQGR_CYCLE_BIT & this_controller->completion_queue_get;
1494
1495 event_index = NORMALIZE_EVENT_POINTER(this_controller->completion_queue_get);
1496 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & this_controller->completion_queue_get;
1497
1498 while (
1499 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
1500 == COMPLETION_QUEUE_CYCLE_BIT(this_controller->completion_queue[get_index])
1501 ) {
1502 completion_count++;
1503
1504 completion_entry = this_controller->completion_queue[get_index];
1505 INCREMENT_COMPLETION_QUEUE_GET(this_controller, get_index, get_cycle);
1506
1507 dev_dbg(scic_to_dev(this_controller),
1508 "%s: completion queue entry:0x%08x\n",
1509 __func__,
1510 completion_entry);
1511
1512 switch (SCU_GET_COMPLETION_TYPE(completion_entry)) {
1513 case SCU_COMPLETION_TYPE_TASK:
1514 scic_sds_controller_task_completion(this_controller, completion_entry);
1515 break;
1516
1517 case SCU_COMPLETION_TYPE_SDMA:
1518 scic_sds_controller_sdma_completion(this_controller, completion_entry);
1519 break;
1520
1521 case SCU_COMPLETION_TYPE_UFI:
1522 scic_sds_controller_unsolicited_frame(this_controller, completion_entry);
1523 break;
1524
1525 case SCU_COMPLETION_TYPE_EVENT:
1526 INCREMENT_EVENT_QUEUE_GET(this_controller, event_index, event_cycle);
1527 scic_sds_controller_event_completion(this_controller, completion_entry);
1528 break;
1529
1530 case SCU_COMPLETION_TYPE_NOTIFY:
1531 /*
1532 * Presently we do the same thing with a notify event that we do with the
1533 * other event codes. */
1534 INCREMENT_EVENT_QUEUE_GET(this_controller, event_index, event_cycle);
1535 scic_sds_controller_event_completion(this_controller, completion_entry);
1536 break;
1537
1538 default:
1539 dev_warn(scic_to_dev(this_controller),
1540 "%s: SCIC Controller received unknown "
1541 "completion type %x\n",
1542 __func__,
1543 completion_entry);
1544 break;
1545 }
1546 }
1547
1548 /* Update the get register if we completed one or more entries */
1549 if (completion_count > 0) {
1550 this_controller->completion_queue_get =
1551 SMU_CQGR_GEN_BIT(ENABLE)
1552 | SMU_CQGR_GEN_BIT(EVENT_ENABLE)
1553 | event_cycle | SMU_CQGR_GEN_VAL(EVENT_POINTER, event_index)
1554 | get_cycle | SMU_CQGR_GEN_VAL(POINTER, get_index);
1555
1556 SMU_CQGR_WRITE(this_controller,
1557 this_controller->completion_queue_get);
1558 }
1559
1560 dev_dbg(scic_to_dev(this_controller),
1561 "%s: completion queue ending get:0x%08x\n",
1562 __func__,
1563 this_controller->completion_queue_get);
1564
1565 }
1566
1567 bool scic_sds_controller_isr(struct scic_sds_controller *scic)
1568 {
1569 if (scic_sds_controller_completion_queue_has_entries(scic)) {
1570 return true;
1571 } else {
1572 /*
1573 * we have a spurious interrupt it could be that we have already
1574 * emptied the completion queue from a previous interrupt */
1575 SMU_ISR_WRITE(scic, SMU_ISR_COMPLETION);
1576
1577 /*
1578 * There is a race in the hardware that could cause us not to be notified
1579 * of an interrupt completion if we do not take this step. We will mask
1580 * then unmask the interrupts so if there is another interrupt pending
1581 * the clearing of the interrupt source we get the next interrupt message. */
1582 SMU_IMR_WRITE(scic, 0xFF000000);
1583 SMU_IMR_WRITE(scic, 0x00000000);
1584 }
1585
1586 return false;
1587 }
1588
1589 void scic_sds_controller_completion_handler(struct scic_sds_controller *scic)
1590 {
1591 /* Empty out the completion queue */
1592 if (scic_sds_controller_completion_queue_has_entries(scic))
1593 scic_sds_controller_process_completions(scic);
1594
1595 /* Clear the interrupt and enable all interrupts again */
1596 SMU_ISR_WRITE(scic, SMU_ISR_COMPLETION);
1597 /* Could we write the value of SMU_ISR_COMPLETION? */
1598 SMU_IMR_WRITE(scic, 0xFF000000);
1599 SMU_IMR_WRITE(scic, 0x00000000);
1600 }
1601
1602 bool scic_sds_controller_error_isr(struct scic_sds_controller *scic)
1603 {
1604 u32 interrupt_status;
1605
1606 interrupt_status = SMU_ISR_READ(scic);
1607
1608 interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
1609
1610 if (interrupt_status != 0) {
1611 /*
1612 * There is an error interrupt pending so let it through and handle
1613 * in the callback */
1614 return true;
1615 }
1616
1617 /*
1618 * There is a race in the hardware that could cause us not to be notified
1619 * of an interrupt completion if we do not take this step. We will mask
1620 * then unmask the error interrupts so if there was another interrupt
1621 * pending we will be notified.
1622 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
1623 SMU_IMR_WRITE(scic, 0x000000FF);
1624 SMU_IMR_WRITE(scic, 0x00000000);
1625
1626 return false;
1627 }
1628
1629 void scic_sds_controller_error_handler(struct scic_sds_controller *scic)
1630 {
1631 u32 interrupt_status;
1632
1633 interrupt_status = SMU_ISR_READ(scic);
1634
1635 if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
1636 scic_sds_controller_completion_queue_has_entries(scic)) {
1637
1638 scic_sds_controller_process_completions(scic);
1639 SMU_ISR_WRITE(scic, SMU_ISR_QUEUE_SUSPEND);
1640
1641 } else {
1642 dev_err(scic_to_dev(scic), "%s: status: %#x\n", __func__,
1643 interrupt_status);
1644
1645 sci_base_state_machine_change_state(
1646 scic_sds_controller_get_base_state_machine(scic),
1647 SCI_BASE_CONTROLLER_STATE_FAILED);
1648
1649 return;
1650 }
1651
1652 /*
1653 * If we dont process any completions I am not sure that we want to do this.
1654 * We are in the middle of a hardware fault and should probably be reset. */
1655 SMU_IMR_WRITE(scic, 0x00000000);
1656 }
1657
1658
1659 u32 scic_sds_controller_get_object_size(void)
1660 {
1661 return sizeof(struct scic_sds_controller);
1662 }
1663
1664
1665 void scic_sds_controller_link_up(
1666 struct scic_sds_controller *scic,
1667 struct scic_sds_port *sci_port,
1668 struct scic_sds_phy *sci_phy)
1669 {
1670 scic_sds_controller_phy_handler_t link_up;
1671 u32 state;
1672
1673 state = scic->parent.state_machine.current_state_id;
1674 link_up = scic_sds_controller_state_handler_table[state].link_up;
1675
1676 if (link_up)
1677 link_up(scic, sci_port, sci_phy);
1678 else
1679 dev_warn(scic_to_dev(scic),
1680 "%s: SCIC Controller linkup event from phy %d in "
1681 "unexpected state %d\n",
1682 __func__,
1683 sci_phy->phy_index,
1684 sci_base_state_machine_get_state(
1685 scic_sds_controller_get_base_state_machine(
1686 scic)));
1687 }
1688
1689
1690 void scic_sds_controller_link_down(
1691 struct scic_sds_controller *scic,
1692 struct scic_sds_port *sci_port,
1693 struct scic_sds_phy *sci_phy)
1694 {
1695 u32 state;
1696 scic_sds_controller_phy_handler_t link_down;
1697
1698 state = scic->parent.state_machine.current_state_id;
1699 link_down = scic_sds_controller_state_handler_table[state].link_down;
1700
1701 if (link_down)
1702 link_down(scic, sci_port, sci_phy);
1703 else
1704 dev_warn(scic_to_dev(scic),
1705 "%s: SCIC Controller linkdown event from phy %d in "
1706 "unexpected state %d\n",
1707 __func__,
1708 sci_phy->phy_index,
1709 sci_base_state_machine_get_state(
1710 scic_sds_controller_get_base_state_machine(
1711 scic)));
1712 }
1713
1714 /**
1715 * This method will write to the SCU PCP register the request value. The method
1716 * is used to suspend/resume ports, devices, and phys.
1717 * @this_controller:
1718 *
1719 *
1720 */
1721 void scic_sds_controller_post_request(
1722 struct scic_sds_controller *this_controller,
1723 u32 request)
1724 {
1725 dev_dbg(scic_to_dev(this_controller),
1726 "%s: SCIC Controller 0x%p post request 0x%08x\n",
1727 __func__,
1728 this_controller,
1729 request);
1730
1731 SMU_PCP_WRITE(this_controller, request);
1732 }
1733
1734 /**
1735 * This method will copy the soft copy of the task context into the physical
1736 * memory accessible by the controller.
1737 * @this_controller: This parameter specifies the controller for which to copy
1738 * the task context.
1739 * @this_request: This parameter specifies the request for which the task
1740 * context is being copied.
1741 *
1742 * After this call is made the SCIC_SDS_IO_REQUEST object will always point to
1743 * the physical memory version of the task context. Thus, all subsequent
1744 * updates to the task context are performed in the TC table (i.e. DMAable
1745 * memory). none
1746 */
1747 void scic_sds_controller_copy_task_context(
1748 struct scic_sds_controller *this_controller,
1749 struct scic_sds_request *this_request)
1750 {
1751 struct scu_task_context *task_context_buffer;
1752
1753 task_context_buffer = scic_sds_controller_get_task_context_buffer(
1754 this_controller, this_request->io_tag
1755 );
1756
1757 memcpy(
1758 task_context_buffer,
1759 this_request->task_context_buffer,
1760 SCI_FIELD_OFFSET(struct scu_task_context, sgl_snapshot_ac)
1761 );
1762
1763 /*
1764 * Now that the soft copy of the TC has been copied into the TC
1765 * table accessible by the silicon. Thus, any further changes to
1766 * the TC (e.g. TC termination) occur in the appropriate location. */
1767 this_request->task_context_buffer = task_context_buffer;
1768 }
1769
1770 /**
1771 * This method returns the task context buffer for the given io tag.
1772 * @this_controller:
1773 * @io_tag:
1774 *
1775 * struct scu_task_context*
1776 */
1777 struct scu_task_context *scic_sds_controller_get_task_context_buffer(
1778 struct scic_sds_controller *this_controller,
1779 u16 io_tag
1780 ) {
1781 u16 task_index = scic_sds_io_tag_get_index(io_tag);
1782
1783 if (task_index < this_controller->task_context_entries) {
1784 return &this_controller->task_context_table[task_index];
1785 }
1786
1787 return NULL;
1788 }
1789
1790 /**
1791 * This method returnst the sequence value from the io tag value
1792 * @this_controller:
1793 * @io_tag:
1794 *
1795 * u16
1796 */
1797
1798 /**
1799 * This method returns the IO request associated with the tag value
1800 * @this_controller:
1801 * @io_tag:
1802 *
1803 * SCIC_SDS_IO_REQUEST_T* NULL if there is no valid IO request at the tag value
1804 */
1805 struct scic_sds_request *scic_sds_controller_get_io_request_from_tag(
1806 struct scic_sds_controller *this_controller,
1807 u16 io_tag
1808 ) {
1809 u16 task_index;
1810 u16 task_sequence;
1811
1812 task_index = scic_sds_io_tag_get_index(io_tag);
1813
1814 if (task_index < this_controller->task_context_entries) {
1815 if (this_controller->io_request_table[task_index] != NULL) {
1816 task_sequence = scic_sds_io_tag_get_sequence(io_tag);
1817
1818 if (task_sequence == this_controller->io_request_sequence[task_index]) {
1819 return this_controller->io_request_table[task_index];
1820 }
1821 }
1822 }
1823
1824 return NULL;
1825 }
1826
1827 /**
1828 * This method allocates remote node index and the reserves the remote node
1829 * context space for use. This method can fail if there are no more remote
1830 * node index available.
1831 * @this_controller: This is the controller object which contains the set of
1832 * free remote node ids
1833 * @the_devce: This is the device object which is requesting the a remote node
1834 * id
1835 * @node_id: This is the remote node id that is assinged to the device if one
1836 * is available
1837 *
1838 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
1839 * node index available.
1840 */
1841 enum sci_status scic_sds_controller_allocate_remote_node_context(
1842 struct scic_sds_controller *this_controller,
1843 struct scic_sds_remote_device *the_device,
1844 u16 *node_id)
1845 {
1846 u16 node_index;
1847 u32 remote_node_count = scic_sds_remote_device_node_count(the_device);
1848
1849 node_index = scic_sds_remote_node_table_allocate_remote_node(
1850 &this_controller->available_remote_nodes, remote_node_count
1851 );
1852
1853 if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
1854 this_controller->device_table[node_index] = the_device;
1855
1856 *node_id = node_index;
1857
1858 return SCI_SUCCESS;
1859 }
1860
1861 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
1862 }
1863
1864 /**
1865 * This method frees the remote node index back to the available pool. Once
1866 * this is done the remote node context buffer is no longer valid and can
1867 * not be used.
1868 * @this_controller:
1869 * @the_device:
1870 * @node_id:
1871 *
1872 */
1873 void scic_sds_controller_free_remote_node_context(
1874 struct scic_sds_controller *this_controller,
1875 struct scic_sds_remote_device *the_device,
1876 u16 node_id)
1877 {
1878 u32 remote_node_count = scic_sds_remote_device_node_count(the_device);
1879
1880 if (this_controller->device_table[node_id] == the_device) {
1881 this_controller->device_table[node_id] = NULL;
1882
1883 scic_sds_remote_node_table_release_remote_node_index(
1884 &this_controller->available_remote_nodes, remote_node_count, node_id
1885 );
1886 }
1887 }
1888
1889 /**
1890 * This method returns the union scu_remote_node_context for the specified remote
1891 * node id.
1892 * @this_controller:
1893 * @node_id:
1894 *
1895 * union scu_remote_node_context*
1896 */
1897 union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
1898 struct scic_sds_controller *this_controller,
1899 u16 node_id
1900 ) {
1901 if (
1902 (node_id < this_controller->remote_node_entries)
1903 && (this_controller->device_table[node_id] != NULL)
1904 ) {
1905 return &this_controller->remote_node_context_table[node_id];
1906 }
1907
1908 return NULL;
1909 }
1910
1911 /**
1912 *
1913 * @resposne_buffer: This is the buffer into which the D2H register FIS will be
1914 * constructed.
1915 * @frame_header: This is the frame header returned by the hardware.
1916 * @frame_buffer: This is the frame buffer returned by the hardware.
1917 *
1918 * This method will combind the frame header and frame buffer to create a SATA
1919 * D2H register FIS none
1920 */
1921 void scic_sds_controller_copy_sata_response(
1922 void *response_buffer,
1923 void *frame_header,
1924 void *frame_buffer)
1925 {
1926 memcpy(
1927 response_buffer,
1928 frame_header,
1929 sizeof(u32)
1930 );
1931
1932 memcpy(
1933 (char *)((char *)response_buffer + sizeof(u32)),
1934 frame_buffer,
1935 sizeof(struct sata_fis_reg_d2h) - sizeof(u32)
1936 );
1937 }
1938
1939 /**
1940 * This method releases the frame once this is done the frame is available for
1941 * re-use by the hardware. The data contained in the frame header and frame
1942 * buffer is no longer valid. The UF queue get pointer is only updated if UF
1943 * control indicates this is appropriate.
1944 * @this_controller:
1945 * @frame_index:
1946 *
1947 */
1948 void scic_sds_controller_release_frame(
1949 struct scic_sds_controller *this_controller,
1950 u32 frame_index)
1951 {
1952 if (scic_sds_unsolicited_frame_control_release_frame(
1953 &this_controller->uf_control, frame_index) == true)
1954 SCU_UFQGP_WRITE(this_controller, this_controller->uf_control.get);
1955 }
1956
1957 /**
1958 * This method sets user parameters and OEM parameters to default values.
1959 * Users can override these values utilizing the scic_user_parameters_set()
1960 * and scic_oem_parameters_set() methods.
1961 * @controller: This parameter specifies the controller for which to set the
1962 * configuration parameters to their default values.
1963 *
1964 */
1965 static void scic_sds_controller_set_default_config_parameters(
1966 struct scic_sds_controller *this_controller)
1967 {
1968 u16 index;
1969
1970 /* Default to no SSC operation. */
1971 this_controller->oem_parameters.sds1.controller.do_enable_ssc = false;
1972
1973 /* Initialize all of the port parameter information to narrow ports. */
1974 for (index = 0; index < SCI_MAX_PORTS; index++) {
1975 this_controller->oem_parameters.sds1.ports[index].phy_mask = 0;
1976 }
1977
1978 /* Initialize all of the phy parameter information. */
1979 for (index = 0; index < SCI_MAX_PHYS; index++) {
1980 /*
1981 * Default to 3G (i.e. Gen 2) for now. User can override if
1982 * they choose. */
1983 this_controller->user_parameters.sds1.phys[index].max_speed_generation = 2;
1984
1985 /*
1986 * Previous Vitesse based expanders had a arbitration issue that
1987 * is worked around by having the upper 32-bits of SAS address
1988 * with a value greater then the Vitesse company identifier.
1989 * Hence, usage of 0x5FCFFFFF. */
1990 this_controller->oem_parameters.sds1.phys[index].sas_address.low
1991 = 0x00000001;
1992 this_controller->oem_parameters.sds1.phys[index].sas_address.high
1993 = 0x5FCFFFFF;
1994 }
1995
1996 this_controller->user_parameters.sds1.stp_inactivity_timeout = 5;
1997 this_controller->user_parameters.sds1.ssp_inactivity_timeout = 5;
1998 this_controller->user_parameters.sds1.stp_max_occupancy_timeout = 5;
1999 this_controller->user_parameters.sds1.ssp_max_occupancy_timeout = 20;
2000 this_controller->user_parameters.sds1.no_outbound_task_timeout = 20;
2001 }
2002
2003
2004 enum sci_status scic_controller_construct(struct scic_sds_controller *controller,
2005 void __iomem *scu_base,
2006 void __iomem *smu_base)
2007 {
2008 u8 index;
2009
2010 sci_base_controller_construct(
2011 &controller->parent,
2012 scic_sds_controller_state_table,
2013 controller->memory_descriptors,
2014 ARRAY_SIZE(controller->memory_descriptors),
2015 NULL
2016 );
2017
2018 controller->scu_registers = scu_base;
2019 controller->smu_registers = smu_base;
2020
2021 scic_sds_port_configuration_agent_construct(&controller->port_agent);
2022
2023 /* Construct the ports for this controller */
2024 for (index = 0; index < SCI_MAX_PORTS; index++)
2025 scic_sds_port_construct(&controller->port_table[index],
2026 index, controller);
2027 scic_sds_port_construct(&controller->port_table[index],
2028 SCIC_SDS_DUMMY_PORT, controller);
2029
2030 /* Construct the phys for this controller */
2031 for (index = 0; index < SCI_MAX_PHYS; index++) {
2032 /* Add all the PHYs to the dummy port */
2033 scic_sds_phy_construct(
2034 &controller->phy_table[index],
2035 &controller->port_table[SCI_MAX_PORTS],
2036 index
2037 );
2038 }
2039
2040 controller->invalid_phy_mask = 0;
2041
2042 /* Set the default maximum values */
2043 controller->completion_event_entries = SCU_EVENT_COUNT;
2044 controller->completion_queue_entries = SCU_COMPLETION_QUEUE_COUNT;
2045 controller->remote_node_entries = SCI_MAX_REMOTE_DEVICES;
2046 controller->logical_port_entries = SCI_MAX_PORTS;
2047 controller->task_context_entries = SCU_IO_REQUEST_COUNT;
2048 controller->uf_control.buffers.count = SCU_UNSOLICITED_FRAME_COUNT;
2049 controller->uf_control.address_table.count = SCU_UNSOLICITED_FRAME_COUNT;
2050
2051 /* Initialize the User and OEM parameters to default values. */
2052 scic_sds_controller_set_default_config_parameters(controller);
2053
2054 return SCI_SUCCESS;
2055 }
2056
2057 /* --------------------------------------------------------------------------- */
2058
2059 enum sci_status scic_controller_initialize(
2060 struct scic_sds_controller *scic)
2061 {
2062 enum sci_status status = SCI_FAILURE_INVALID_STATE;
2063 sci_base_controller_handler_t initialize;
2064 u32 state;
2065
2066 state = scic->parent.state_machine.current_state_id;
2067 initialize = scic_sds_controller_state_handler_table[state].base.initialize;
2068
2069 if (initialize)
2070 status = initialize(&scic->parent);
2071 else
2072 dev_warn(scic_to_dev(scic),
2073 "%s: SCIC Controller initialize operation requested "
2074 "in invalid state %d\n",
2075 __func__,
2076 sci_base_state_machine_get_state(
2077 scic_sds_controller_get_base_state_machine(
2078 scic)));
2079
2080 return status;
2081 }
2082
2083 /* --------------------------------------------------------------------------- */
2084
2085 u32 scic_controller_get_suggested_start_timeout(
2086 struct scic_sds_controller *sc)
2087 {
2088 /* Validate the user supplied parameters. */
2089 if (sc == NULL)
2090 return 0;
2091
2092 /*
2093 * The suggested minimum timeout value for a controller start operation:
2094 *
2095 * Signature FIS Timeout
2096 * + Phy Start Timeout
2097 * + Number of Phy Spin Up Intervals
2098 * ---------------------------------
2099 * Number of milliseconds for the controller start operation.
2100 *
2101 * NOTE: The number of phy spin up intervals will be equivalent
2102 * to the number of phys divided by the number phys allowed
2103 * per interval - 1 (once OEM parameters are supported).
2104 * Currently we assume only 1 phy per interval. */
2105
2106 return (SCIC_SDS_SIGNATURE_FIS_TIMEOUT
2107 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
2108 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL));
2109 }
2110
2111 /* --------------------------------------------------------------------------- */
2112
2113 enum sci_status scic_controller_start(
2114 struct scic_sds_controller *scic,
2115 u32 timeout)
2116 {
2117 enum sci_status status = SCI_FAILURE_INVALID_STATE;
2118 sci_base_controller_timed_handler_t start;
2119 u32 state;
2120
2121 state = scic->parent.state_machine.current_state_id;
2122 start = scic_sds_controller_state_handler_table[state].base.start;
2123
2124 if (start)
2125 status = start(&scic->parent, timeout);
2126 else
2127 dev_warn(scic_to_dev(scic),
2128 "%s: SCIC Controller start operation requested in "
2129 "invalid state %d\n",
2130 __func__,
2131 sci_base_state_machine_get_state(
2132 scic_sds_controller_get_base_state_machine(
2133 scic)));
2134
2135 return status;
2136 }
2137
2138 /* --------------------------------------------------------------------------- */
2139
2140 enum sci_status scic_controller_stop(
2141 struct scic_sds_controller *scic,
2142 u32 timeout)
2143 {
2144 enum sci_status status = SCI_FAILURE_INVALID_STATE;
2145 sci_base_controller_timed_handler_t stop;
2146 u32 state;
2147
2148 state = scic->parent.state_machine.current_state_id;
2149 stop = scic_sds_controller_state_handler_table[state].base.stop;
2150
2151 if (stop)
2152 status = stop(&scic->parent, timeout);
2153 else
2154 dev_warn(scic_to_dev(scic),
2155 "%s: SCIC Controller stop operation requested in "
2156 "invalid state %d\n",
2157 __func__,
2158 sci_base_state_machine_get_state(
2159 scic_sds_controller_get_base_state_machine(
2160 scic)));
2161
2162 return status;
2163 }
2164
2165 /* --------------------------------------------------------------------------- */
2166
2167 enum sci_status scic_controller_reset(
2168 struct scic_sds_controller *scic)
2169 {
2170 enum sci_status status = SCI_FAILURE_INVALID_STATE;
2171 sci_base_controller_handler_t reset;
2172 u32 state;
2173
2174 state = scic->parent.state_machine.current_state_id;
2175 reset = scic_sds_controller_state_handler_table[state].base.reset;
2176
2177 if (reset)
2178 status = reset(&scic->parent);
2179 else
2180 dev_warn(scic_to_dev(scic),
2181 "%s: SCIC Controller reset operation requested in "
2182 "invalid state %d\n",
2183 __func__,
2184 sci_base_state_machine_get_state(
2185 scic_sds_controller_get_base_state_machine(
2186 scic)));
2187
2188 return status;
2189 }
2190
2191 enum sci_io_status scic_controller_start_io(
2192 struct scic_sds_controller *scic,
2193 struct scic_sds_remote_device *remote_device,
2194 struct scic_sds_request *io_request,
2195 u16 io_tag)
2196 {
2197 u32 state;
2198 sci_base_controller_start_request_handler_t start_io;
2199
2200 state = scic->parent.state_machine.current_state_id;
2201 start_io = scic_sds_controller_state_handler_table[state].base.start_io;
2202
2203 return start_io(&scic->parent,
2204 (struct sci_base_remote_device *) remote_device,
2205 (struct sci_base_request *)io_request, io_tag);
2206 }
2207
2208 /* --------------------------------------------------------------------------- */
2209
2210 enum sci_status scic_controller_terminate_request(
2211 struct scic_sds_controller *scic,
2212 struct scic_sds_remote_device *remote_device,
2213 struct scic_sds_request *request)
2214 {
2215 sci_base_controller_request_handler_t terminate_request;
2216 u32 state;
2217
2218 state = scic->parent.state_machine.current_state_id;
2219 terminate_request = scic_sds_controller_state_handler_table[state].terminate_request;
2220
2221 return terminate_request(&scic->parent,
2222 (struct sci_base_remote_device *)remote_device,
2223 (struct sci_base_request *)request);
2224 }
2225
2226 /* --------------------------------------------------------------------------- */
2227
2228 enum sci_status scic_controller_complete_io(
2229 struct scic_sds_controller *scic,
2230 struct scic_sds_remote_device *remote_device,
2231 struct scic_sds_request *io_request)
2232 {
2233 u32 state;
2234 sci_base_controller_request_handler_t complete_io;
2235
2236 state = scic->parent.state_machine.current_state_id;
2237 complete_io = scic_sds_controller_state_handler_table[state].base.complete_io;
2238
2239 return complete_io(&scic->parent,
2240 (struct sci_base_remote_device *)remote_device,
2241 (struct sci_base_request *)io_request);
2242 }
2243
2244 /* --------------------------------------------------------------------------- */
2245
2246
2247 enum sci_task_status scic_controller_start_task(
2248 struct scic_sds_controller *scic,
2249 struct scic_sds_remote_device *remote_device,
2250 struct scic_sds_request *task_request,
2251 u16 task_tag)
2252 {
2253 u32 state;
2254 sci_base_controller_start_request_handler_t start_task;
2255 enum sci_task_status status = SCI_TASK_FAILURE_INVALID_STATE;
2256
2257 state = scic->parent.state_machine.current_state_id;
2258 start_task = scic_sds_controller_state_handler_table[state].base.start_task;
2259
2260 if (start_task)
2261 status = start_task(&scic->parent,
2262 (struct sci_base_remote_device *)remote_device,
2263 (struct sci_base_request *)task_request,
2264 task_tag);
2265 else
2266 dev_warn(scic_to_dev(scic),
2267 "%s: SCIC Controller starting task from invalid "
2268 "state\n",
2269 __func__);
2270
2271 return status;
2272 }
2273
2274 /* --------------------------------------------------------------------------- */
2275
2276 enum sci_status scic_controller_complete_task(
2277 struct scic_sds_controller *scic,
2278 struct scic_sds_remote_device *remote_device,
2279 struct scic_sds_request *task_request)
2280 {
2281 u32 state;
2282 sci_base_controller_request_handler_t complete_task;
2283 enum sci_status status = SCI_FAILURE_INVALID_STATE;
2284
2285 state = scic->parent.state_machine.current_state_id;
2286 complete_task = scic_sds_controller_state_handler_table[state].base.complete_task;
2287
2288 if (complete_task)
2289 status = complete_task(&scic->parent,
2290 (struct sci_base_remote_device *)remote_device,
2291 (struct sci_base_request *)task_request);
2292 else
2293 dev_warn(scic_to_dev(scic),
2294 "%s: SCIC Controller completing task from invalid "
2295 "state\n",
2296 __func__);
2297
2298 return status;
2299 }
2300
2301
2302 /* --------------------------------------------------------------------------- */
2303
2304 enum sci_status scic_controller_get_port_handle(
2305 struct scic_sds_controller *scic,
2306 u8 port_index,
2307 struct scic_sds_port **port_handle)
2308 {
2309 if (port_index < scic->logical_port_entries) {
2310 *port_handle = &scic->port_table[port_index];
2311
2312 return SCI_SUCCESS;
2313 }
2314
2315 return SCI_FAILURE_INVALID_PORT;
2316 }
2317
2318 /* --------------------------------------------------------------------------- */
2319
2320 enum sci_status scic_controller_get_phy_handle(
2321 struct scic_sds_controller *scic,
2322 u8 phy_index,
2323 struct scic_sds_phy **phy_handle)
2324 {
2325 if (phy_index < ARRAY_SIZE(scic->phy_table)) {
2326 *phy_handle = &scic->phy_table[phy_index];
2327
2328 return SCI_SUCCESS;
2329 }
2330
2331 dev_err(scic_to_dev(scic),
2332 "%s: Controller:0x%p PhyId:0x%x invalid phy index\n",
2333 __func__, scic, phy_index);
2334
2335 return SCI_FAILURE_INVALID_PHY;
2336 }
2337
2338 /* --------------------------------------------------------------------------- */
2339
2340 u16 scic_controller_allocate_io_tag(
2341 struct scic_sds_controller *scic)
2342 {
2343 u16 task_context;
2344 u16 sequence_count;
2345
2346 if (!sci_pool_empty(scic->tci_pool)) {
2347 sci_pool_get(scic->tci_pool, task_context);
2348
2349 sequence_count = scic->io_request_sequence[task_context];
2350
2351 return scic_sds_io_tag_construct(sequence_count, task_context);
2352 }
2353
2354 return SCI_CONTROLLER_INVALID_IO_TAG;
2355 }
2356
2357 /* --------------------------------------------------------------------------- */
2358
2359 enum sci_status scic_controller_free_io_tag(
2360 struct scic_sds_controller *scic,
2361 u16 io_tag)
2362 {
2363 u16 sequence;
2364 u16 index;
2365
2366 BUG_ON(io_tag == SCI_CONTROLLER_INVALID_IO_TAG);
2367
2368 sequence = scic_sds_io_tag_get_sequence(io_tag);
2369 index = scic_sds_io_tag_get_index(io_tag);
2370
2371 if (!sci_pool_full(scic->tci_pool)) {
2372 if (sequence == scic->io_request_sequence[index]) {
2373 scic_sds_io_sequence_increment(
2374 scic->io_request_sequence[index]);
2375
2376 sci_pool_put(scic->tci_pool, index);
2377
2378 return SCI_SUCCESS;
2379 }
2380 }
2381
2382 return SCI_FAILURE_INVALID_IO_TAG;
2383 }
2384
2385 /* --------------------------------------------------------------------------- */
2386
2387 void scic_controller_enable_interrupts(
2388 struct scic_sds_controller *scic)
2389 {
2390 BUG_ON(scic->smu_registers == NULL);
2391 SMU_IMR_WRITE(scic, 0x00000000);
2392 }
2393
2394 /* --------------------------------------------------------------------------- */
2395
2396 void scic_controller_disable_interrupts(
2397 struct scic_sds_controller *scic)
2398 {
2399 BUG_ON(scic->smu_registers == NULL);
2400 SMU_IMR_WRITE(scic, 0xffffffff);
2401 }
2402
2403 /* --------------------------------------------------------------------------- */
2404
2405 enum sci_status scic_controller_set_mode(
2406 struct scic_sds_controller *scic,
2407 enum sci_controller_mode operating_mode)
2408 {
2409 enum sci_status status = SCI_SUCCESS;
2410
2411 if ((scic->parent.state_machine.current_state_id ==
2412 SCI_BASE_CONTROLLER_STATE_INITIALIZING) ||
2413 (scic->parent.state_machine.current_state_id ==
2414 SCI_BASE_CONTROLLER_STATE_INITIALIZED)) {
2415 switch (operating_mode) {
2416 case SCI_MODE_SPEED:
2417 scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES;
2418 scic->task_context_entries = SCU_IO_REQUEST_COUNT;
2419 scic->uf_control.buffers.count =
2420 SCU_UNSOLICITED_FRAME_COUNT;
2421 scic->completion_event_entries = SCU_EVENT_COUNT;
2422 scic->completion_queue_entries =
2423 SCU_COMPLETION_QUEUE_COUNT;
2424 scic_sds_controller_build_memory_descriptor_table(scic);
2425 break;
2426
2427 case SCI_MODE_SIZE:
2428 scic->remote_node_entries = SCI_MIN_REMOTE_DEVICES;
2429 scic->task_context_entries = SCI_MIN_IO_REQUESTS;
2430 scic->uf_control.buffers.count =
2431 SCU_MIN_UNSOLICITED_FRAMES;
2432 scic->completion_event_entries = SCU_MIN_EVENTS;
2433 scic->completion_queue_entries =
2434 SCU_MIN_COMPLETION_QUEUE_ENTRIES;
2435 scic_sds_controller_build_memory_descriptor_table(scic);
2436 break;
2437
2438 default:
2439 status = SCI_FAILURE_INVALID_PARAMETER_VALUE;
2440 break;
2441 }
2442 } else
2443 status = SCI_FAILURE_INVALID_STATE;
2444
2445 return status;
2446 }
2447
2448 /**
2449 * scic_sds_controller_reset_hardware() -
2450 *
2451 * This method will reset the controller hardware.
2452 */
2453 void scic_sds_controller_reset_hardware(
2454 struct scic_sds_controller *scic)
2455 {
2456 /* Disable interrupts so we dont take any spurious interrupts */
2457 scic_controller_disable_interrupts(scic);
2458
2459 /* Reset the SCU */
2460 SMU_SMUSRCR_WRITE(scic, 0xFFFFFFFF);
2461
2462 /* Delay for 1ms to before clearing the CQP and UFQPR. */
2463 scic_cb_stall_execution(1000);
2464
2465 /* The write to the CQGR clears the CQP */
2466 SMU_CQGR_WRITE(scic, 0x00000000);
2467
2468 /* The write to the UFQGP clears the UFQPR */
2469 SCU_UFQGP_WRITE(scic, 0x00000000);
2470 }
2471
2472 /* --------------------------------------------------------------------------- */
2473
2474 enum sci_status scic_user_parameters_set(
2475 struct scic_sds_controller *scic,
2476 union scic_user_parameters *scic_parms)
2477 {
2478 if (
2479 (scic->parent.state_machine.current_state_id
2480 == SCI_BASE_CONTROLLER_STATE_RESET)
2481 || (scic->parent.state_machine.current_state_id
2482 == SCI_BASE_CONTROLLER_STATE_INITIALIZING)
2483 || (scic->parent.state_machine.current_state_id
2484 == SCI_BASE_CONTROLLER_STATE_INITIALIZED)
2485 ) {
2486 u16 index;
2487
2488 /*
2489 * Validate the user parameters. If they are not legal, then
2490 * return a failure. */
2491 for (index = 0; index < SCI_MAX_PHYS; index++) {
2492 if (!
2493 (scic_parms->sds1.phys[index].max_speed_generation
2494 <= SCIC_SDS_PARM_MAX_SPEED
2495 && scic_parms->sds1.phys[index].max_speed_generation
2496 > SCIC_SDS_PARM_NO_SPEED
2497 )
2498 )
2499 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2500 }
2501
2502 memcpy(&scic->user_parameters, scic_parms, sizeof(*scic_parms));
2503
2504 return SCI_SUCCESS;
2505 }
2506
2507 return SCI_FAILURE_INVALID_STATE;
2508 }
2509
2510 /* --------------------------------------------------------------------------- */
2511
2512 void scic_user_parameters_get(
2513 struct scic_sds_controller *scic,
2514 union scic_user_parameters *scic_parms)
2515 {
2516 memcpy(scic_parms, (&scic->user_parameters), sizeof(*scic_parms));
2517 }
2518
2519 /* --------------------------------------------------------------------------- */
2520
2521 enum sci_status scic_oem_parameters_set(
2522 struct scic_sds_controller *scic,
2523 union scic_oem_parameters *scic_parms)
2524 {
2525 if (
2526 (scic->parent.state_machine.current_state_id
2527 == SCI_BASE_CONTROLLER_STATE_RESET)
2528 || (scic->parent.state_machine.current_state_id
2529 == SCI_BASE_CONTROLLER_STATE_INITIALIZING)
2530 || (scic->parent.state_machine.current_state_id
2531 == SCI_BASE_CONTROLLER_STATE_INITIALIZED)
2532 ) {
2533 u16 index;
2534
2535 /*
2536 * Validate the oem parameters. If they are not legal, then
2537 * return a failure. */
2538 for (index = 0; index < SCI_MAX_PORTS; index++) {
2539 if (scic_parms->sds1.ports[index].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX) {
2540 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2541 }
2542 }
2543
2544 for (index = 0; index < SCI_MAX_PHYS; index++) {
2545 if (
2546 scic_parms->sds1.phys[index].sas_address.high == 0
2547 && scic_parms->sds1.phys[index].sas_address.low == 0
2548 ) {
2549 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2550 }
2551 }
2552
2553 memcpy(&scic->oem_parameters, scic_parms, sizeof(*scic_parms));
2554 return SCI_SUCCESS;
2555 }
2556
2557 return SCI_FAILURE_INVALID_STATE;
2558 }
2559
2560 /* --------------------------------------------------------------------------- */
2561
2562 void scic_oem_parameters_get(
2563 struct scic_sds_controller *scic,
2564 union scic_oem_parameters *scic_parms)
2565 {
2566 memcpy(scic_parms, (&scic->oem_parameters), sizeof(*scic_parms));
2567 }
2568
2569 /* --------------------------------------------------------------------------- */
2570
2571
2572 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
2573 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
2574 #define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
2575 #define INTERRUPT_COALESCE_NUMBER_MAX 256
2576 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
2577 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
2578
2579 enum sci_status scic_controller_set_interrupt_coalescence(
2580 struct scic_sds_controller *scic_controller,
2581 u32 coalesce_number,
2582 u32 coalesce_timeout)
2583 {
2584 u8 timeout_encode = 0;
2585 u32 min = 0;
2586 u32 max = 0;
2587
2588 /* Check if the input parameters fall in the range. */
2589 if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
2590 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2591
2592 /*
2593 * Defined encoding for interrupt coalescing timeout:
2594 * Value Min Max Units
2595 * ----- --- --- -----
2596 * 0 - - Disabled
2597 * 1 13.3 20.0 ns
2598 * 2 26.7 40.0
2599 * 3 53.3 80.0
2600 * 4 106.7 160.0
2601 * 5 213.3 320.0
2602 * 6 426.7 640.0
2603 * 7 853.3 1280.0
2604 * 8 1.7 2.6 us
2605 * 9 3.4 5.1
2606 * 10 6.8 10.2
2607 * 11 13.7 20.5
2608 * 12 27.3 41.0
2609 * 13 54.6 81.9
2610 * 14 109.2 163.8
2611 * 15 218.5 327.7
2612 * 16 436.9 655.4
2613 * 17 873.8 1310.7
2614 * 18 1.7 2.6 ms
2615 * 19 3.5 5.2
2616 * 20 7.0 10.5
2617 * 21 14.0 21.0
2618 * 22 28.0 41.9
2619 * 23 55.9 83.9
2620 * 24 111.8 167.8
2621 * 25 223.7 335.5
2622 * 26 447.4 671.1
2623 * 27 894.8 1342.2
2624 * 28 1.8 2.7 s
2625 * Others Undefined */
2626
2627 /*
2628 * Use the table above to decide the encode of interrupt coalescing timeout
2629 * value for register writing. */
2630 if (coalesce_timeout == 0)
2631 timeout_encode = 0;
2632 else{
2633 /* make the timeout value in unit of (10 ns). */
2634 coalesce_timeout = coalesce_timeout * 100;
2635 min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
2636 max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
2637
2638 /* get the encode of timeout for register writing. */
2639 for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
2640 timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
2641 timeout_encode++) {
2642 if (min <= coalesce_timeout && max > coalesce_timeout)
2643 break;
2644 else if (coalesce_timeout >= max && coalesce_timeout < min * 2
2645 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
2646 if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
2647 break;
2648 else{
2649 timeout_encode++;
2650 break;
2651 }
2652 } else {
2653 max = max * 2;
2654 min = min * 2;
2655 }
2656 }
2657
2658 if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
2659 /* the value is out of range. */
2660 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2661 }
2662
2663 SMU_ICC_WRITE(
2664 scic_controller,
2665 (SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
2666 SMU_ICC_GEN_VAL(TIMER, timeout_encode))
2667 );
2668
2669 scic_controller->interrupt_coalesce_number = (u16)coalesce_number;
2670 scic_controller->interrupt_coalesce_timeout = coalesce_timeout / 100;
2671
2672 return SCI_SUCCESS;
2673 }
2674
2675
2676 struct scic_sds_controller *scic_controller_alloc(struct device *dev)
2677 {
2678 return devm_kzalloc(dev, sizeof(struct scic_sds_controller), GFP_KERNEL);
2679 }
2680
2681 /*
2682 * *****************************************************************************
2683 * * DEFAULT STATE HANDLERS
2684 * ***************************************************************************** */
2685
2686 /**
2687 *
2688 * @controller: This is struct sci_base_controller object which is cast into a
2689 * struct scic_sds_controller object.
2690 * @remote_device: This is struct sci_base_remote_device which, if it was used, would
2691 * be cast to a struct scic_sds_remote_device.
2692 * @io_request: This is the struct sci_base_request which, if it was used, would be
2693 * cast to a SCIC_SDS_IO_REQUEST.
2694 * @io_tag: This is the IO tag to be assigned to the IO request or
2695 * SCI_CONTROLLER_INVALID_IO_TAG.
2696 *
2697 * This method is called when the struct scic_sds_controller default start io/task
2698 * handler is in place. - Issue a warning message enum sci_status
2699 * SCI_FAILURE_INVALID_STATE
2700 */
2701 static enum sci_status scic_sds_controller_default_start_operation_handler(
2702 struct sci_base_controller *controller,
2703 struct sci_base_remote_device *remote_device,
2704 struct sci_base_request *io_request,
2705 u16 io_tag)
2706 {
2707 struct scic_sds_controller *this_controller;
2708
2709 this_controller = (struct scic_sds_controller *)controller;
2710
2711 dev_warn(scic_to_dev(this_controller),
2712 "%s: SCIC Controller requested to start an io/task from "
2713 "invalid state %d\n",
2714 __func__,
2715 sci_base_state_machine_get_state(
2716 scic_sds_controller_get_base_state_machine(
2717 this_controller)));
2718
2719 return SCI_FAILURE_INVALID_STATE;
2720 }
2721
2722 /**
2723 *
2724 * @controller: This is struct sci_base_controller object which is cast into a
2725 * struct scic_sds_controller object.
2726 * @remote_device: This is struct sci_base_remote_device which, if it was used, would
2727 * be cast to a struct scic_sds_remote_device.
2728 * @io_request: This is the struct sci_base_request which, if it was used, would be
2729 * cast to a SCIC_SDS_IO_REQUEST.
2730 *
2731 * This method is called when the struct scic_sds_controller default request handler
2732 * is in place. - Issue a warning message enum sci_status SCI_FAILURE_INVALID_STATE
2733 */
2734 static enum sci_status scic_sds_controller_default_request_handler(
2735 struct sci_base_controller *controller,
2736 struct sci_base_remote_device *remote_device,
2737 struct sci_base_request *io_request)
2738 {
2739 struct scic_sds_controller *this_controller;
2740
2741 this_controller = (struct scic_sds_controller *)controller;
2742
2743 dev_warn(scic_to_dev(this_controller),
2744 "%s: SCIC Controller request operation from invalid state %d\n",
2745 __func__,
2746 sci_base_state_machine_get_state(
2747 scic_sds_controller_get_base_state_machine(
2748 this_controller)));
2749
2750 return SCI_FAILURE_INVALID_STATE;
2751 }
2752
2753 /*
2754 * *****************************************************************************
2755 * * GENERAL (COMMON) STATE HANDLERS
2756 * ***************************************************************************** */
2757
2758 /**
2759 *
2760 * @controller: The struct sci_base_controller object which is cast into a
2761 * struct scic_sds_controller object.
2762 *
2763 * This method is called when the struct scic_sds_controller is in the ready state
2764 * reset handler is in place. - Transition to
2765 * SCI_BASE_CONTROLLER_STATE_RESETTING enum sci_status SCI_SUCCESS
2766 */
2767 static enum sci_status scic_sds_controller_general_reset_handler(
2768 struct sci_base_controller *controller)
2769 {
2770 struct scic_sds_controller *this_controller;
2771
2772 this_controller = (struct scic_sds_controller *)controller;
2773
2774 /*
2775 * The reset operation is not a graceful cleanup just perform the state
2776 * transition. */
2777 sci_base_state_machine_change_state(
2778 scic_sds_controller_get_base_state_machine(this_controller),
2779 SCI_BASE_CONTROLLER_STATE_RESETTING
2780 );
2781
2782 return SCI_SUCCESS;
2783 }
2784
2785 /*
2786 * *****************************************************************************
2787 * * RESET STATE HANDLERS
2788 * ***************************************************************************** */
2789
2790 /**
2791 *
2792 * @controller: This is the struct sci_base_controller object which is cast into a
2793 * struct scic_sds_controller object.
2794 *
2795 * This method is the struct scic_sds_controller initialize handler for the reset
2796 * state. - Currently this function does nothing enum sci_status SCI_FAILURE This
2797 * function is not yet implemented and is a valid request from the reset state.
2798 */
2799 static enum sci_status scic_sds_controller_reset_state_initialize_handler(
2800 struct sci_base_controller *controller)
2801 {
2802 u32 index;
2803 enum sci_status result = SCI_SUCCESS;
2804 struct scic_sds_controller *this_controller;
2805
2806 this_controller = (struct scic_sds_controller *)controller;
2807
2808 sci_base_state_machine_change_state(
2809 scic_sds_controller_get_base_state_machine(this_controller),
2810 SCI_BASE_CONTROLLER_STATE_INITIALIZING
2811 );
2812
2813 this_controller->timeout_timer = scic_cb_timer_create(
2814 this_controller,
2815 (void (*)(void *))scic_sds_controller_timeout_handler,
2816 (void (*)(void *))controller);
2817
2818 scic_sds_controller_initialize_phy_startup(this_controller);
2819
2820 scic_sds_controller_initialize_power_control(this_controller);
2821
2822 /*
2823 * There is nothing to do here for B0 since we do not have to
2824 * program the AFE registers.
2825 * / @todo The AFE settings are supposed to be correct for the B0 but
2826 * / presently they seem to be wrong. */
2827 scic_sds_controller_afe_initialization(this_controller);
2828
2829 if (SCI_SUCCESS == result) {
2830 u32 status;
2831 u32 terminate_loop;
2832
2833 /* Take the hardware out of reset */
2834 SMU_SMUSRCR_WRITE(this_controller, 0x00000000);
2835
2836 /*
2837 * / @todo Provide meaningfull error code for hardware failure
2838 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
2839 result = SCI_FAILURE;
2840 terminate_loop = 100;
2841
2842 while (terminate_loop-- && (result != SCI_SUCCESS)) {
2843 /* Loop until the hardware reports success */
2844 scic_cb_stall_execution(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2845 status = SMU_SMUCSR_READ(this_controller);
2846
2847 if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED) {
2848 result = SCI_SUCCESS;
2849 }
2850 }
2851 }
2852
2853 if (result == SCI_SUCCESS) {
2854 u32 max_supported_ports;
2855 u32 max_supported_devices;
2856 u32 max_supported_io_requests;
2857 u32 device_context_capacity;
2858
2859 /*
2860 * Determine what are the actaul device capacities that the
2861 * hardware will support */
2862 device_context_capacity = SMU_DCC_READ(this_controller);
2863
2864 max_supported_ports =
2865 smu_dcc_get_max_ports(device_context_capacity);
2866 max_supported_devices =
2867 smu_dcc_get_max_remote_node_context(device_context_capacity);
2868 max_supported_io_requests =
2869 smu_dcc_get_max_task_context(device_context_capacity);
2870
2871 /* Make all PEs that are unassigned match up with the logical ports */
2872 for (index = 0; index < max_supported_ports; index++) {
2873 scu_register_write(
2874 this_controller,
2875 this_controller->scu_registers->peg0.ptsg.protocol_engine[index],
2876 index
2877 );
2878 }
2879
2880 /* Record the smaller of the two capacity values */
2881 this_controller->logical_port_entries =
2882 min(max_supported_ports, this_controller->logical_port_entries);
2883
2884 this_controller->task_context_entries =
2885 min(max_supported_io_requests, this_controller->task_context_entries);
2886
2887 this_controller->remote_node_entries =
2888 min(max_supported_devices, this_controller->remote_node_entries);
2889
2890 /*
2891 * Now that we have the correct hardware reported minimum values
2892 * build the MDL for the controller. Default to a performance
2893 * configuration. */
2894 scic_controller_set_mode(this_controller, SCI_MODE_SPEED);
2895 }
2896
2897 /* Initialize hardware PCI Relaxed ordering in DMA engines */
2898 if (result == SCI_SUCCESS) {
2899 u32 dma_configuration;
2900
2901 /* Configure the payload DMA */
2902 dma_configuration = SCU_PDMACR_READ(this_controller);
2903 dma_configuration |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2904 SCU_PDMACR_WRITE(this_controller, dma_configuration);
2905
2906 /* Configure the control DMA */
2907 dma_configuration = SCU_CDMACR_READ(this_controller);
2908 dma_configuration |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2909 SCU_CDMACR_WRITE(this_controller, dma_configuration);
2910 }
2911
2912 /*
2913 * Initialize the PHYs before the PORTs because the PHY registers
2914 * are accessed during the port initialization. */
2915 if (result == SCI_SUCCESS) {
2916 /* Initialize the phys */
2917 for (index = 0;
2918 (result == SCI_SUCCESS) && (index < SCI_MAX_PHYS);
2919 index++) {
2920 result = scic_sds_phy_initialize(
2921 &this_controller->phy_table[index],
2922 &this_controller->scu_registers->peg0.pe[index].ll
2923 );
2924 }
2925 }
2926
2927 if (result == SCI_SUCCESS) {
2928 /* Initialize the logical ports */
2929 for (index = 0;
2930 (index < this_controller->logical_port_entries)
2931 && (result == SCI_SUCCESS);
2932 index++) {
2933 result = scic_sds_port_initialize(
2934 &this_controller->port_table[index],
2935 &this_controller->scu_registers->peg0.pe[index].tl,
2936 &this_controller->scu_registers->peg0.ptsg.port[index],
2937 &this_controller->scu_registers->peg0.ptsg.protocol_engine,
2938 &this_controller->scu_registers->peg0.viit[index]
2939 );
2940 }
2941 }
2942
2943 if (SCI_SUCCESS == result) {
2944 result = scic_sds_port_configuration_agent_initialize(
2945 this_controller,
2946 &this_controller->port_agent
2947 );
2948 }
2949
2950 /* Advance the controller state machine */
2951 if (result == SCI_SUCCESS) {
2952 sci_base_state_machine_change_state(
2953 scic_sds_controller_get_base_state_machine(this_controller),
2954 SCI_BASE_CONTROLLER_STATE_INITIALIZED
2955 );
2956 } else {
2957 sci_base_state_machine_change_state(
2958 scic_sds_controller_get_base_state_machine(this_controller),
2959 SCI_BASE_CONTROLLER_STATE_FAILED
2960 );
2961 }
2962
2963 return result;
2964 }
2965
2966 /*
2967 * *****************************************************************************
2968 * * INITIALIZED STATE HANDLERS
2969 * ***************************************************************************** */
2970
2971 /**
2972 *
2973 * @controller: This is the struct sci_base_controller object which is cast into a
2974 * struct scic_sds_controller object.
2975 * @timeout: This is the allowed time for the controller object to reach the
2976 * started state.
2977 *
2978 * This method is the struct scic_sds_controller start handler for the initialized
2979 * state. - Validate we have a good memory descriptor table - Initialze the
2980 * physical memory before programming the hardware - Program the SCU hardware
2981 * with the physical memory addresses passed in the memory descriptor table. -
2982 * Initialzie the TCi pool - Initialize the RNi pool - Initialize the
2983 * completion queue - Initialize the unsolicited frame data - Take the SCU port
2984 * task scheduler out of reset - Start the first phy object. - Transition to
2985 * SCI_BASE_CONTROLLER_STATE_STARTING. enum sci_status SCI_SUCCESS if all of the
2986 * controller start operations complete
2987 * SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD if one or more of the memory
2988 * descriptor fields is invalid.
2989 */
2990 static enum sci_status scic_sds_controller_initialized_state_start_handler(
2991 struct sci_base_controller *controller,
2992 u32 timeout)
2993 {
2994 u16 index;
2995 enum sci_status result;
2996 struct scic_sds_controller *this_controller;
2997
2998 this_controller = (struct scic_sds_controller *)controller;
2999
3000 /* Make sure that the SCI User filled in the memory descriptor table correctly */
3001 result = scic_sds_controller_validate_memory_descriptor_table(this_controller);
3002
3003 if (result == SCI_SUCCESS) {
3004 /* The memory descriptor list looks good so program the hardware */
3005 scic_sds_controller_ram_initialization(this_controller);
3006 }
3007
3008 if (SCI_SUCCESS == result) {
3009 /* Build the TCi free pool */
3010 sci_pool_initialize(this_controller->tci_pool);
3011 for (index = 0; index < this_controller->task_context_entries; index++) {
3012 sci_pool_put(this_controller->tci_pool, index);
3013 }
3014
3015 /* Build the RNi free pool */
3016 scic_sds_remote_node_table_initialize(
3017 &this_controller->available_remote_nodes,
3018 this_controller->remote_node_entries
3019 );
3020 }
3021
3022 if (SCI_SUCCESS == result) {
3023 /*
3024 * Before anything else lets make sure we will not be interrupted
3025 * by the hardware. */
3026 scic_controller_disable_interrupts(this_controller);
3027
3028 /* Enable the port task scheduler */
3029 scic_sds_controller_enable_port_task_scheduler(this_controller);
3030
3031 /* Assign all the task entries to this controller physical function */
3032 scic_sds_controller_assign_task_entries(this_controller);
3033
3034 /* Now initialze the completion queue */
3035 scic_sds_controller_initialize_completion_queue(this_controller);
3036
3037 /* Initialize the unsolicited frame queue for use */
3038 scic_sds_controller_initialize_unsolicited_frame_queue(this_controller);
3039 }
3040
3041 if (SCI_SUCCESS == result) {
3042 scic_sds_controller_start_next_phy(this_controller);
3043
3044 scic_cb_timer_start(this_controller,
3045 this_controller->timeout_timer,
3046 timeout);
3047
3048 sci_base_state_machine_change_state(
3049 scic_sds_controller_get_base_state_machine(this_controller),
3050 SCI_BASE_CONTROLLER_STATE_STARTING
3051 );
3052 }
3053
3054 return result;
3055 }
3056
3057 /*
3058 * *****************************************************************************
3059 * * INITIALIZED STATE HANDLERS
3060 * ***************************************************************************** */
3061
3062 /**
3063 *
3064 * @controller: This is struct scic_sds_controller which receives the link up
3065 * notification.
3066 * @port: This is struct scic_sds_port with which the phy is associated.
3067 * @phy: This is the struct scic_sds_phy which has gone link up.
3068 *
3069 * This method is called when the struct scic_sds_controller is in the starting state
3070 * link up handler is called. This method will perform the following: - Stop
3071 * the phy timer - Start the next phy - Report the link up condition to the
3072 * port object none
3073 */
3074 static void scic_sds_controller_starting_state_link_up_handler(
3075 struct scic_sds_controller *this_controller,
3076 struct scic_sds_port *port,
3077 struct scic_sds_phy *phy)
3078 {
3079 scic_sds_controller_phy_timer_stop(this_controller);
3080
3081 this_controller->port_agent.link_up_handler(
3082 this_controller, &this_controller->port_agent, port, phy
3083 );
3084 /* scic_sds_port_link_up(port, phy); */
3085
3086 scic_sds_controller_start_next_phy(this_controller);
3087 }
3088
3089 /**
3090 *
3091 * @controller: This is struct scic_sds_controller which receives the link down
3092 * notification.
3093 * @port: This is struct scic_sds_port with which the phy is associated.
3094 * @phy: This is the struct scic_sds_phy which has gone link down.
3095 *
3096 * This method is called when the struct scic_sds_controller is in the starting state
3097 * link down handler is called. - Report the link down condition to the port
3098 * object none
3099 */
3100 static void scic_sds_controller_starting_state_link_down_handler(
3101 struct scic_sds_controller *this_controller,
3102 struct scic_sds_port *port,
3103 struct scic_sds_phy *phy)
3104 {
3105 this_controller->port_agent.link_down_handler(
3106 this_controller, &this_controller->port_agent, port, phy
3107 );
3108 /* scic_sds_port_link_down(port, phy); */
3109 }
3110
3111 /*
3112 * *****************************************************************************
3113 * * READY STATE HANDLERS
3114 * ***************************************************************************** */
3115
3116 /**
3117 *
3118 * @controller: The struct sci_base_controller object which is cast into a
3119 * struct scic_sds_controller object.
3120 * @timeout: The timeout for when the stop operation should report a failure.
3121 *
3122 * This method is called when the struct scic_sds_controller is in the ready state
3123 * stop handler is called. - Start the timeout timer - Transition to
3124 * SCI_BASE_CONTROLLER_STATE_STOPPING. enum sci_status SCI_SUCCESS
3125 */
3126 static enum sci_status scic_sds_controller_ready_state_stop_handler(
3127 struct sci_base_controller *controller,
3128 u32 timeout)
3129 {
3130 struct scic_sds_controller *this_controller;
3131
3132 this_controller = (struct scic_sds_controller *)controller;
3133
3134 scic_cb_timer_start(this_controller,
3135 this_controller->timeout_timer,
3136 timeout);
3137
3138 sci_base_state_machine_change_state(
3139 scic_sds_controller_get_base_state_machine(this_controller),
3140 SCI_BASE_CONTROLLER_STATE_STOPPING
3141 );
3142
3143 return SCI_SUCCESS;
3144 }
3145
3146 /**
3147 *
3148 * @controller: This is struct sci_base_controller object which is cast into a
3149 * struct scic_sds_controller object.
3150 * @remote_device: This is struct sci_base_remote_device which is cast to a
3151 * struct scic_sds_remote_device object.
3152 * @io_request: This is the struct sci_base_request which is cast to a
3153 * SCIC_SDS_IO_REQUEST object.
3154 * @io_tag: This is the IO tag to be assigned to the IO request or
3155 * SCI_CONTROLLER_INVALID_IO_TAG.
3156 *
3157 * This method is called when the struct scic_sds_controller is in the ready state and
3158 * the start io handler is called. - Start the io request on the remote device
3159 * - if successful - assign the io_request to the io_request_table - post the
3160 * request to the hardware enum sci_status SCI_SUCCESS if the start io operation
3161 * succeeds SCI_FAILURE_INSUFFICIENT_RESOURCES if the IO tag could not be
3162 * allocated for the io request. SCI_FAILURE_INVALID_STATE if one or more
3163 * objects are not in a valid state to accept io requests. How does the io_tag
3164 * parameter get assigned to the io request?
3165 */
3166 static enum sci_status scic_sds_controller_ready_state_start_io_handler(
3167 struct sci_base_controller *controller,
3168 struct sci_base_remote_device *remote_device,
3169 struct sci_base_request *io_request,
3170 u16 io_tag)
3171 {
3172 enum sci_status status;
3173
3174 struct scic_sds_controller *this_controller;
3175 struct scic_sds_request *the_request;
3176 struct scic_sds_remote_device *the_device;
3177
3178 this_controller = (struct scic_sds_controller *)controller;
3179 the_request = (struct scic_sds_request *)io_request;
3180 the_device = (struct scic_sds_remote_device *)remote_device;
3181
3182 status = scic_sds_remote_device_start_io(this_controller, the_device, the_request);
3183
3184 if (status == SCI_SUCCESS) {
3185 this_controller->io_request_table[
3186 scic_sds_io_tag_get_index(the_request->io_tag)] = the_request;
3187
3188 scic_sds_controller_post_request(
3189 this_controller,
3190 scic_sds_request_get_post_context(the_request)
3191 );
3192 }
3193
3194 return status;
3195 }
3196
3197 /**
3198 *
3199 * @controller: This is struct sci_base_controller object which is cast into a
3200 * struct scic_sds_controller object.
3201 * @remote_device: This is struct sci_base_remote_device which is cast to a
3202 * struct scic_sds_remote_device object.
3203 * @io_request: This is the struct sci_base_request which is cast to a
3204 * SCIC_SDS_IO_REQUEST object.
3205 *
3206 * This method is called when the struct scic_sds_controller is in the ready state and
3207 * the complete io handler is called. - Complete the io request on the remote
3208 * device - if successful - remove the io_request to the io_request_table
3209 * enum sci_status SCI_SUCCESS if the start io operation succeeds
3210 * SCI_FAILURE_INVALID_STATE if one or more objects are not in a valid state to
3211 * accept io requests.
3212 */
3213 static enum sci_status scic_sds_controller_ready_state_complete_io_handler(
3214 struct sci_base_controller *controller,
3215 struct sci_base_remote_device *remote_device,
3216 struct sci_base_request *io_request)
3217 {
3218 u16 index;
3219 enum sci_status status;
3220 struct scic_sds_controller *this_controller;
3221 struct scic_sds_request *the_request;
3222 struct scic_sds_remote_device *the_device;
3223
3224 this_controller = (struct scic_sds_controller *)controller;
3225 the_request = (struct scic_sds_request *)io_request;
3226 the_device = (struct scic_sds_remote_device *)remote_device;
3227
3228 status = scic_sds_remote_device_complete_io(
3229 this_controller, the_device, the_request);
3230
3231 if (status == SCI_SUCCESS) {
3232 index = scic_sds_io_tag_get_index(the_request->io_tag);
3233 this_controller->io_request_table[index] = NULL;
3234 }
3235
3236 return status;
3237 }
3238
3239 /**
3240 *
3241 * @controller: This is struct sci_base_controller object which is cast into a
3242 * struct scic_sds_controller object.
3243 * @remote_device: This is struct sci_base_remote_device which is cast to a
3244 * struct scic_sds_remote_device object.
3245 * @io_request: This is the struct sci_base_request which is cast to a
3246 * SCIC_SDS_IO_REQUEST object.
3247 *
3248 * This method is called when the struct scic_sds_controller is in the ready state and
3249 * the continue io handler is called. enum sci_status
3250 */
3251 static enum sci_status scic_sds_controller_ready_state_continue_io_handler(
3252 struct sci_base_controller *controller,
3253 struct sci_base_remote_device *remote_device,
3254 struct sci_base_request *io_request)
3255 {
3256 struct scic_sds_controller *this_controller;
3257 struct scic_sds_request *the_request;
3258
3259 the_request = (struct scic_sds_request *)io_request;
3260 this_controller = (struct scic_sds_controller *)controller;
3261
3262 this_controller->io_request_table[
3263 scic_sds_io_tag_get_index(the_request->io_tag)] = the_request;
3264
3265 scic_sds_controller_post_request(
3266 this_controller,
3267 scic_sds_request_get_post_context(the_request)
3268 );
3269
3270 return SCI_SUCCESS;
3271 }
3272
3273 /**
3274 *
3275 * @controller: This is struct sci_base_controller object which is cast into a
3276 * struct scic_sds_controller object.
3277 * @remote_device: This is struct sci_base_remote_device which is cast to a
3278 * struct scic_sds_remote_device object.
3279 * @io_request: This is the struct sci_base_request which is cast to a
3280 * SCIC_SDS_IO_REQUEST object.
3281 * @task_tag: This is the task tag to be assigned to the task request or
3282 * SCI_CONTROLLER_INVALID_IO_TAG.
3283 *
3284 * This method is called when the struct scic_sds_controller is in the ready state and
3285 * the start task handler is called. - The remote device is requested to start
3286 * the task request - if successful - assign the task to the io_request_table -
3287 * post the request to the SCU hardware enum sci_status SCI_SUCCESS if the start io
3288 * operation succeeds SCI_FAILURE_INSUFFICIENT_RESOURCES if the IO tag could
3289 * not be allocated for the io request. SCI_FAILURE_INVALID_STATE if one or
3290 * more objects are not in a valid state to accept io requests. How does the io
3291 * tag get assigned in this code path?
3292 */
3293 static enum sci_status scic_sds_controller_ready_state_start_task_handler(
3294 struct sci_base_controller *controller,
3295 struct sci_base_remote_device *remote_device,
3296 struct sci_base_request *io_request,
3297 u16 task_tag)
3298 {
3299 struct scic_sds_controller *this_controller = (struct scic_sds_controller *)
3300 controller;
3301 struct scic_sds_request *the_request = (struct scic_sds_request *)
3302 io_request;
3303 struct scic_sds_remote_device *the_device = (struct scic_sds_remote_device *)
3304 remote_device;
3305 enum sci_status status;
3306
3307 status = scic_sds_remote_device_start_task(
3308 this_controller, the_device, the_request
3309 );
3310
3311 if (status == SCI_SUCCESS) {
3312 this_controller->io_request_table[
3313 scic_sds_io_tag_get_index(the_request->io_tag)] = the_request;
3314
3315 scic_sds_controller_post_request(
3316 this_controller,
3317 scic_sds_request_get_post_context(the_request)
3318 );
3319 } else if (status == SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS) {
3320 this_controller->io_request_table[
3321 scic_sds_io_tag_get_index(the_request->io_tag)] = the_request;
3322
3323 /*
3324 * We will let framework know this task request started successfully,
3325 * although core is still woring on starting the request (to post tc when
3326 * RNC is resumed.) */
3327 status = SCI_SUCCESS;
3328 }
3329 return status;
3330 }
3331
3332 /**
3333 *
3334 * @controller: This is struct sci_base_controller object which is cast into a
3335 * struct scic_sds_controller object.
3336 * @remote_device: This is struct sci_base_remote_device which is cast to a
3337 * struct scic_sds_remote_device object.
3338 * @io_request: This is the struct sci_base_request which is cast to a
3339 * SCIC_SDS_IO_REQUEST object.
3340 *
3341 * This method is called when the struct scic_sds_controller is in the ready state and
3342 * the terminate request handler is called. - call the io request terminate
3343 * function - if successful - post the terminate request to the SCU hardware
3344 * enum sci_status SCI_SUCCESS if the start io operation succeeds
3345 * SCI_FAILURE_INVALID_STATE if one or more objects are not in a valid state to
3346 * accept io requests.
3347 */
3348 static enum sci_status scic_sds_controller_ready_state_terminate_request_handler(
3349 struct sci_base_controller *controller,
3350 struct sci_base_remote_device *remote_device,
3351 struct sci_base_request *io_request)
3352 {
3353 struct scic_sds_controller *this_controller = (struct scic_sds_controller *)
3354 controller;
3355 struct scic_sds_request *the_request = (struct scic_sds_request *)
3356 io_request;
3357 enum sci_status status;
3358
3359 status = scic_sds_io_request_terminate(the_request);
3360 if (status == SCI_SUCCESS) {
3361 /*
3362 * Utilize the original post context command and or in the POST_TC_ABORT
3363 * request sub-type. */
3364 scic_sds_controller_post_request(
3365 this_controller,
3366 scic_sds_request_get_post_context(the_request)
3367 | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT
3368 );
3369 }
3370
3371 return status;
3372 }
3373
3374 /**
3375 *
3376 * @controller: This is struct scic_sds_controller which receives the link up
3377 * notification.
3378 * @port: This is struct scic_sds_port with which the phy is associated.
3379 * @phy: This is the struct scic_sds_phy which has gone link up.
3380 *
3381 * This method is called when the struct scic_sds_controller is in the starting state
3382 * link up handler is called. This method will perform the following: - Stop
3383 * the phy timer - Start the next phy - Report the link up condition to the
3384 * port object none
3385 */
3386 static void scic_sds_controller_ready_state_link_up_handler(
3387 struct scic_sds_controller *this_controller,
3388 struct scic_sds_port *port,
3389 struct scic_sds_phy *phy)
3390 {
3391 this_controller->port_agent.link_up_handler(
3392 this_controller, &this_controller->port_agent, port, phy
3393 );
3394 }
3395
3396 /**
3397 *
3398 * @controller: This is struct scic_sds_controller which receives the link down
3399 * notification.
3400 * @port: This is struct scic_sds_port with which the phy is associated.
3401 * @phy: This is the struct scic_sds_phy which has gone link down.
3402 *
3403 * This method is called when the struct scic_sds_controller is in the starting state
3404 * link down handler is called. - Report the link down condition to the port
3405 * object none
3406 */
3407 static void scic_sds_controller_ready_state_link_down_handler(
3408 struct scic_sds_controller *this_controller,
3409 struct scic_sds_port *port,
3410 struct scic_sds_phy *phy)
3411 {
3412 this_controller->port_agent.link_down_handler(
3413 this_controller, &this_controller->port_agent, port, phy
3414 );
3415 }
3416
3417 /*
3418 * *****************************************************************************
3419 * * STOPPING STATE HANDLERS
3420 * ***************************************************************************** */
3421
3422 /**
3423 *
3424 * @controller: This is struct sci_base_controller object which is cast into a
3425 * struct scic_sds_controller object.
3426 * @remote_device: This is struct sci_base_remote_device which is cast to a
3427 * struct scic_sds_remote_device object.
3428 * @io_request: This is the struct sci_base_request which is cast to a
3429 * SCIC_SDS_IO_REQUEST object.
3430 *
3431 * This method is called when the struct scic_sds_controller is in a stopping state
3432 * and the complete io handler is called. - This function is not yet
3433 * implemented enum sci_status SCI_FAILURE
3434 */
3435 static enum sci_status scic_sds_controller_stopping_state_complete_io_handler(
3436 struct sci_base_controller *controller,
3437 struct sci_base_remote_device *remote_device,
3438 struct sci_base_request *io_request)
3439 {
3440 struct scic_sds_controller *this_controller;
3441
3442 this_controller = (struct scic_sds_controller *)controller;
3443
3444 /* / @todo Implement this function */
3445 return SCI_FAILURE;
3446 }
3447
3448 /**
3449 *
3450 * @controller: This is struct sci_base_controller object which is cast into a
3451 * struct scic_sds_controller object.
3452 * @remote_device: This is struct sci_base_remote_device which is cast to a
3453 * struct scic_sds_remote_device object.
3454 * @io_request: This is the struct sci_base_request which is cast to a
3455 * SCIC_SDS_IO_REQUEST object.
3456 *
3457 * This method is called when the struct scic_sds_controller is in a stopping state
3458 * and the complete task handler is called. - This function is not yet
3459 * implemented enum sci_status SCI_FAILURE
3460 */
3461
3462 /*
3463 * *****************************************************************************
3464 * * STOPPED STATE HANDLERS
3465 * ***************************************************************************** */
3466
3467 /*
3468 * *****************************************************************************
3469 * * FAILED STATE HANDLERS
3470 * ***************************************************************************** */
3471
3472 const struct scic_sds_controller_state_handler scic_sds_controller_state_handler_table[] = {
3473 [SCI_BASE_CONTROLLER_STATE_INITIAL] = {
3474 .base.start_io = scic_sds_controller_default_start_operation_handler,
3475 .base.complete_io = scic_sds_controller_default_request_handler,
3476 .base.continue_io = scic_sds_controller_default_request_handler,
3477 .terminate_request = scic_sds_controller_default_request_handler,
3478 },
3479 [SCI_BASE_CONTROLLER_STATE_RESET] = {
3480 .base.initialize = scic_sds_controller_reset_state_initialize_handler,
3481 .base.start_io = scic_sds_controller_default_start_operation_handler,
3482 .base.complete_io = scic_sds_controller_default_request_handler,
3483 .base.continue_io = scic_sds_controller_default_request_handler,
3484 .terminate_request = scic_sds_controller_default_request_handler,
3485 },
3486 [SCI_BASE_CONTROLLER_STATE_INITIALIZING] = {
3487 .base.start_io = scic_sds_controller_default_start_operation_handler,
3488 .base.complete_io = scic_sds_controller_default_request_handler,
3489 .base.continue_io = scic_sds_controller_default_request_handler,
3490 .terminate_request = scic_sds_controller_default_request_handler,
3491 },
3492 [SCI_BASE_CONTROLLER_STATE_INITIALIZED] = {
3493 .base.start = scic_sds_controller_initialized_state_start_handler,
3494 .base.start_io = scic_sds_controller_default_start_operation_handler,
3495 .base.complete_io = scic_sds_controller_default_request_handler,
3496 .base.continue_io = scic_sds_controller_default_request_handler,
3497 .terminate_request = scic_sds_controller_default_request_handler,
3498 },
3499 [SCI_BASE_CONTROLLER_STATE_STARTING] = {
3500 .base.start_io = scic_sds_controller_default_start_operation_handler,
3501 .base.complete_io = scic_sds_controller_default_request_handler,
3502 .base.continue_io = scic_sds_controller_default_request_handler,
3503 .terminate_request = scic_sds_controller_default_request_handler,
3504 .link_up = scic_sds_controller_starting_state_link_up_handler,
3505 .link_down = scic_sds_controller_starting_state_link_down_handler
3506 },
3507 [SCI_BASE_CONTROLLER_STATE_READY] = {
3508 .base.stop = scic_sds_controller_ready_state_stop_handler,
3509 .base.reset = scic_sds_controller_general_reset_handler,
3510 .base.start_io = scic_sds_controller_ready_state_start_io_handler,
3511 .base.complete_io = scic_sds_controller_ready_state_complete_io_handler,
3512 .base.continue_io = scic_sds_controller_ready_state_continue_io_handler,
3513 .base.start_task = scic_sds_controller_ready_state_start_task_handler,
3514 .base.complete_task = scic_sds_controller_ready_state_complete_io_handler,
3515 .terminate_request = scic_sds_controller_ready_state_terminate_request_handler,
3516 .link_up = scic_sds_controller_ready_state_link_up_handler,
3517 .link_down = scic_sds_controller_ready_state_link_down_handler
3518 },
3519 [SCI_BASE_CONTROLLER_STATE_RESETTING] = {
3520 .base.start_io = scic_sds_controller_default_start_operation_handler,
3521 .base.complete_io = scic_sds_controller_default_request_handler,
3522 .base.continue_io = scic_sds_controller_default_request_handler,
3523 .terminate_request = scic_sds_controller_default_request_handler,
3524 },
3525 [SCI_BASE_CONTROLLER_STATE_STOPPING] = {
3526 .base.start_io = scic_sds_controller_default_start_operation_handler,
3527 .base.complete_io = scic_sds_controller_stopping_state_complete_io_handler,
3528 .base.continue_io = scic_sds_controller_default_request_handler,
3529 .terminate_request = scic_sds_controller_default_request_handler,
3530 },
3531 [SCI_BASE_CONTROLLER_STATE_STOPPED] = {
3532 .base.reset = scic_sds_controller_general_reset_handler,
3533 .base.start_io = scic_sds_controller_default_start_operation_handler,
3534 .base.complete_io = scic_sds_controller_default_request_handler,
3535 .base.continue_io = scic_sds_controller_default_request_handler,
3536 .terminate_request = scic_sds_controller_default_request_handler,
3537 },
3538 [SCI_BASE_CONTROLLER_STATE_FAILED] = {
3539 .base.reset = scic_sds_controller_general_reset_handler,
3540 .base.start_io = scic_sds_controller_default_start_operation_handler,
3541 .base.complete_io = scic_sds_controller_default_request_handler,
3542 .base.continue_io = scic_sds_controller_default_request_handler,
3543 .terminate_request = scic_sds_controller_default_request_handler,
3544 },
3545 };
3546
3547 /**
3548 *
3549 * @object: This is the struct sci_base_object which is cast to a struct scic_sds_controller
3550 * object.
3551 *
3552 * This method implements the actions taken by the struct scic_sds_controller on entry
3553 * to the SCI_BASE_CONTROLLER_STATE_INITIAL. - Set the state handlers to the
3554 * controllers initial state. none This function should initialze the
3555 * controller object.
3556 */
3557 static void scic_sds_controller_initial_state_enter(
3558 struct sci_base_object *object)
3559 {
3560 struct scic_sds_controller *this_controller;
3561
3562 this_controller = (struct scic_sds_controller *)object;
3563
3564 sci_base_state_machine_change_state(
3565 &this_controller->parent.state_machine, SCI_BASE_CONTROLLER_STATE_RESET);
3566 }
3567
3568 /**
3569 *
3570 * @object: This is the struct sci_base_object which is cast to a struct scic_sds_controller
3571 * object.
3572 *
3573 * This method implements the actions taken by the struct scic_sds_controller on exit
3574 * from the SCI_BASE_CONTROLLER_STATE_STARTING. - This function stops the
3575 * controller starting timeout timer. none
3576 */
3577 static void scic_sds_controller_starting_state_exit(
3578 struct sci_base_object *object)
3579 {
3580 struct scic_sds_controller *scic = (struct scic_sds_controller *)object;
3581
3582 scic_cb_timer_stop(scic, scic->timeout_timer);
3583 }
3584
3585 /**
3586 *
3587 * @object: This is the struct sci_base_object which is cast to a struct scic_sds_controller
3588 * object.
3589 *
3590 * This method implements the actions taken by the struct scic_sds_controller on entry
3591 * to the SCI_BASE_CONTROLLER_STATE_READY. - Set the state handlers to the
3592 * controllers ready state. none
3593 */
3594 static void scic_sds_controller_ready_state_enter(
3595 struct sci_base_object *object)
3596 {
3597 struct scic_sds_controller *this_controller;
3598
3599 this_controller = (struct scic_sds_controller *)object;
3600
3601 /* set the default interrupt coalescence number and timeout value. */
3602 scic_controller_set_interrupt_coalescence(
3603 this_controller, 0x10, 250);
3604 }
3605
3606 /**
3607 *
3608 * @object: This is the struct sci_base_object which is cast to a struct scic_sds_controller
3609 * object.
3610 *
3611 * This method implements the actions taken by the struct scic_sds_controller on exit
3612 * from the SCI_BASE_CONTROLLER_STATE_READY. - This function does nothing. none
3613 */
3614 static void scic_sds_controller_ready_state_exit(
3615 struct sci_base_object *object)
3616 {
3617 struct scic_sds_controller *this_controller;
3618
3619 this_controller = (struct scic_sds_controller *)object;
3620
3621 /* disable interrupt coalescence. */
3622 scic_controller_set_interrupt_coalescence(this_controller, 0, 0);
3623 }
3624
3625 /**
3626 *
3627 * @object: This is the struct sci_base_object which is cast to a struct scic_sds_controller
3628 * object.
3629 *
3630 * This method implements the actions taken by the struct scic_sds_controller on entry
3631 * to the SCI_BASE_CONTROLLER_STATE_READY. - Set the state handlers to the
3632 * controllers ready state. - Stop the phys on this controller - Stop the ports
3633 * on this controller - Stop all of the remote devices on this controller none
3634 */
3635 static void scic_sds_controller_stopping_state_enter(
3636 struct sci_base_object *object)
3637 {
3638 struct scic_sds_controller *this_controller;
3639
3640 this_controller = (struct scic_sds_controller *)object;
3641
3642 /* Stop all of the components for this controller */
3643 scic_sds_controller_stop_phys(this_controller);
3644 scic_sds_controller_stop_ports(this_controller);
3645 scic_sds_controller_stop_devices(this_controller);
3646 }
3647
3648 /**
3649 *
3650 * @object: This is the struct sci_base_object which is cast to a struct scic_sds_controller
3651 * object.
3652 *
3653 * This method implements the actions taken by the struct scic_sds_controller on exit
3654 * from the SCI_BASE_CONTROLLER_STATE_STOPPING. - This function stops the
3655 * controller stopping timeout timer. none
3656 */
3657 static void scic_sds_controller_stopping_state_exit(
3658 struct sci_base_object *object)
3659 {
3660 struct scic_sds_controller *this_controller;
3661
3662 this_controller = (struct scic_sds_controller *)object;
3663
3664 scic_cb_timer_stop(this_controller, this_controller->timeout_timer);
3665 }
3666
3667 /**
3668 *
3669 * @object: This is the struct sci_base_object which is cast to a struct scic_sds_controller
3670 * object.
3671 *
3672 * This method implements the actions taken by the struct scic_sds_controller on entry
3673 * to the SCI_BASE_CONTROLLER_STATE_RESETTING. - Set the state handlers to the
3674 * controllers resetting state. - Write to the SCU hardware reset register to
3675 * force a reset - Transition to the SCI_BASE_CONTROLLER_STATE_RESET none
3676 */
3677 static void scic_sds_controller_resetting_state_enter(
3678 struct sci_base_object *object)
3679 {
3680 struct scic_sds_controller *this_controller;
3681
3682 this_controller = (struct scic_sds_controller *)object;
3683
3684 scic_sds_controller_reset_hardware(this_controller);
3685
3686 sci_base_state_machine_change_state(
3687 scic_sds_controller_get_base_state_machine(this_controller),
3688 SCI_BASE_CONTROLLER_STATE_RESET
3689 );
3690 }
3691
3692 /* --------------------------------------------------------------------------- */
3693
3694 const struct sci_base_state scic_sds_controller_state_table[] = {
3695 [SCI_BASE_CONTROLLER_STATE_INITIAL] = {
3696 .enter_state = scic_sds_controller_initial_state_enter,
3697 },
3698 [SCI_BASE_CONTROLLER_STATE_RESET] = {},
3699 [SCI_BASE_CONTROLLER_STATE_INITIALIZING] = {},
3700 [SCI_BASE_CONTROLLER_STATE_INITIALIZED] = {},
3701 [SCI_BASE_CONTROLLER_STATE_STARTING] = {
3702 .exit_state = scic_sds_controller_starting_state_exit,
3703 },
3704 [SCI_BASE_CONTROLLER_STATE_READY] = {
3705 .enter_state = scic_sds_controller_ready_state_enter,
3706 .exit_state = scic_sds_controller_ready_state_exit,
3707 },
3708 [SCI_BASE_CONTROLLER_STATE_RESETTING] = {
3709 .enter_state = scic_sds_controller_resetting_state_enter,
3710 },
3711 [SCI_BASE_CONTROLLER_STATE_STOPPING] = {
3712 .enter_state = scic_sds_controller_stopping_state_enter,
3713 .exit_state = scic_sds_controller_stopping_state_exit,
3714 },
3715 [SCI_BASE_CONTROLLER_STATE_STOPPED] = {},
3716 [SCI_BASE_CONTROLLER_STATE_FAILED] = {}
3717 };
3718
This page took 0.187764 seconds and 5 git commands to generate.