1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2015 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/pci.h>
23 #include <linux/netdevice.h>
24 #include <linux/vmalloc.h>
25 #include "liquidio_common.h"
26 #include "octeon_droq.h"
27 #include "octeon_iq.h"
28 #include "response_manager.h"
29 #include "octeon_device.h"
30 #include "octeon_main.h"
31 #include "octeon_network.h"
32 #include "cn66xx_device.h"
33 #include "cn23xx_pf_device.h"
35 #define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
36 (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
38 struct iq_post_status
{
43 static void check_db_timeout(struct work_struct
*work
);
44 static void __check_db_timeout(struct octeon_device
*oct
, u64 iq_no
);
46 static void (*reqtype_free_fn
[MAX_OCTEON_DEVICES
][REQTYPE_LAST
+ 1]) (void *);
48 static inline int IQ_INSTR_MODE_64B(struct octeon_device
*oct
, int iq_no
)
50 struct octeon_instr_queue
*iq
=
51 (struct octeon_instr_queue
*)oct
->instr_queue
[iq_no
];
55 #define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
57 /* Define this to return the request status comaptible to old code */
58 /*#define OCTEON_USE_OLD_REQ_STATUS*/
60 /* Return 0 on success, 1 on failure */
61 int octeon_init_instr_queue(struct octeon_device
*oct
,
62 union oct_txpciq txpciq
,
65 struct octeon_instr_queue
*iq
;
66 struct octeon_iq_config
*conf
= NULL
;
67 u32 iq_no
= (u32
)txpciq
.s
.q_no
;
69 struct cavium_wq
*db_wq
;
70 int orig_node
= dev_to_node(&oct
->pci_dev
->dev
);
71 int numa_node
= cpu_to_node(iq_no
% num_online_cpus());
73 if (OCTEON_CN6XXX(oct
))
74 conf
= &(CFG_GET_IQ_CFG(CHIP_FIELD(oct
, cn6xxx
, conf
)));
75 else if (OCTEON_CN23XX_PF(oct
))
76 conf
= &(CFG_GET_IQ_CFG(CHIP_FIELD(oct
, cn23xx_pf
, conf
)));
78 dev_err(&oct
->pci_dev
->dev
, "Unsupported Chip %x\n",
83 if (num_descs
& (num_descs
- 1)) {
84 dev_err(&oct
->pci_dev
->dev
,
85 "Number of descriptors for instr queue %d not in power of 2.\n",
90 q_size
= (u32
)conf
->instr_type
* num_descs
;
92 iq
= oct
->instr_queue
[iq_no
];
96 set_dev_node(&oct
->pci_dev
->dev
, numa_node
);
97 iq
->base_addr
= lio_dma_alloc(oct
, q_size
,
98 (dma_addr_t
*)&iq
->base_addr_dma
);
99 set_dev_node(&oct
->pci_dev
->dev
, orig_node
);
101 iq
->base_addr
= lio_dma_alloc(oct
, q_size
,
102 (dma_addr_t
*)&iq
->base_addr_dma
);
103 if (!iq
->base_addr
) {
104 dev_err(&oct
->pci_dev
->dev
, "Cannot allocate memory for instr queue %d\n",
109 iq
->max_count
= num_descs
;
111 /* Initialize a list to holds requests that have been posted to Octeon
112 * but has yet to be fetched by octeon
114 iq
->request_list
= vmalloc_node((sizeof(*iq
->request_list
) * num_descs
),
116 if (!iq
->request_list
)
117 iq
->request_list
= vmalloc(sizeof(*iq
->request_list
) *
119 if (!iq
->request_list
) {
120 lio_dma_free(oct
, q_size
, iq
->base_addr
, iq
->base_addr_dma
);
121 dev_err(&oct
->pci_dev
->dev
, "Alloc failed for IQ[%d] nr free list\n",
126 memset(iq
->request_list
, 0, sizeof(*iq
->request_list
) * num_descs
);
128 dev_dbg(&oct
->pci_dev
->dev
, "IQ[%d]: base: %p basedma: %llx count: %d\n",
129 iq_no
, iq
->base_addr
, iq
->base_addr_dma
, iq
->max_count
);
131 iq
->txpciq
.u64
= txpciq
.u64
;
132 iq
->fill_threshold
= (u32
)conf
->db_min
;
134 iq
->host_write_index
= 0;
135 iq
->octeon_read_index
= 0;
137 iq
->last_db_time
= 0;
138 iq
->do_auto_flush
= 1;
139 iq
->db_timeout
= (u32
)conf
->db_timeout
;
140 atomic_set(&iq
->instr_pending
, 0);
142 /* Initialize the spinlock for this instruction queue */
143 spin_lock_init(&iq
->lock
);
144 spin_lock_init(&iq
->post_lock
);
146 spin_lock_init(&iq
->iq_flush_running_lock
);
148 oct
->io_qmask
.iq
|= (1ULL << iq_no
);
150 /* Set the 32B/64B mode for each input queue */
151 oct
->io_qmask
.iq64B
|= ((conf
->instr_type
== 64) << iq_no
);
152 iq
->iqcmd_64B
= (conf
->instr_type
== 64);
154 oct
->fn_list
.setup_iq_regs(oct
, iq_no
);
156 oct
->check_db_wq
[iq_no
].wq
= alloc_workqueue("check_iq_db",
159 if (!oct
->check_db_wq
[iq_no
].wq
) {
160 lio_dma_free(oct
, q_size
, iq
->base_addr
, iq
->base_addr_dma
);
161 dev_err(&oct
->pci_dev
->dev
, "check db wq create failed for iq %d\n",
166 db_wq
= &oct
->check_db_wq
[iq_no
];
168 INIT_DELAYED_WORK(&db_wq
->wk
.work
, check_db_timeout
);
169 db_wq
->wk
.ctxptr
= oct
;
170 db_wq
->wk
.ctxul
= iq_no
;
171 queue_delayed_work(db_wq
->wq
, &db_wq
->wk
.work
, msecs_to_jiffies(1));
176 int octeon_delete_instr_queue(struct octeon_device
*oct
, u32 iq_no
)
178 u64 desc_size
= 0, q_size
;
179 struct octeon_instr_queue
*iq
= oct
->instr_queue
[iq_no
];
181 cancel_delayed_work_sync(&oct
->check_db_wq
[iq_no
].wk
.work
);
182 destroy_workqueue(oct
->check_db_wq
[iq_no
].wq
);
184 if (OCTEON_CN6XXX(oct
))
186 CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct
, cn6xxx
, conf
));
187 else if (OCTEON_CN23XX_PF(oct
))
189 CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct
, cn23xx_pf
, conf
));
191 vfree(iq
->request_list
);
194 q_size
= iq
->max_count
* desc_size
;
195 lio_dma_free(oct
, (u32
)q_size
, iq
->base_addr
,
202 /* Return 0 on success, 1 on failure */
203 int octeon_setup_iq(struct octeon_device
*oct
,
206 union oct_txpciq txpciq
,
210 u32 iq_no
= (u32
)txpciq
.s
.q_no
;
211 int numa_node
= cpu_to_node(iq_no
% num_online_cpus());
213 if (oct
->instr_queue
[iq_no
]) {
214 dev_dbg(&oct
->pci_dev
->dev
, "IQ is in use. Cannot create the IQ: %d again\n",
216 oct
->instr_queue
[iq_no
]->txpciq
.u64
= txpciq
.u64
;
217 oct
->instr_queue
[iq_no
]->app_ctx
= app_ctx
;
220 oct
->instr_queue
[iq_no
] =
221 vmalloc_node(sizeof(struct octeon_instr_queue
), numa_node
);
222 if (!oct
->instr_queue
[iq_no
])
223 oct
->instr_queue
[iq_no
] =
224 vmalloc(sizeof(struct octeon_instr_queue
));
225 if (!oct
->instr_queue
[iq_no
])
228 memset(oct
->instr_queue
[iq_no
], 0,
229 sizeof(struct octeon_instr_queue
));
231 oct
->instr_queue
[iq_no
]->q_index
= q_index
;
232 oct
->instr_queue
[iq_no
]->app_ctx
= app_ctx
;
233 oct
->instr_queue
[iq_no
]->ifidx
= ifidx
;
235 if (octeon_init_instr_queue(oct
, txpciq
, num_descs
)) {
236 vfree(oct
->instr_queue
[iq_no
]);
237 oct
->instr_queue
[iq_no
] = NULL
;
242 oct
->fn_list
.enable_io_queues(oct
);
246 int lio_wait_for_instr_fetch(struct octeon_device
*oct
)
248 int i
, retry
= 1000, pending
, instr_cnt
= 0;
253 /*for (i = 0; i < oct->num_iqs; i++) {*/
254 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
255 if (!(oct
->io_qmask
.iq
& (1ULL << i
)))
259 instr_queue
[i
]->instr_pending
);
261 __check_db_timeout(oct
, i
);
262 instr_cnt
+= pending
;
268 schedule_timeout_uninterruptible(1);
270 } while (retry
-- && instr_cnt
);
276 ring_doorbell(struct octeon_device
*oct
, struct octeon_instr_queue
*iq
)
278 if (atomic_read(&oct
->status
) == OCT_DEV_RUNNING
) {
279 writel(iq
->fill_cnt
, iq
->doorbell_reg
);
280 /* make sure doorbell write goes through */
283 iq
->last_db_time
= jiffies
;
288 static inline void __copy_cmd_into_iq(struct octeon_instr_queue
*iq
,
293 cmdsize
= ((iq
->iqcmd_64B
) ? 64 : 32);
294 iqptr
= iq
->base_addr
+ (cmdsize
* iq
->host_write_index
);
296 memcpy(iqptr
, cmd
, cmdsize
);
299 static inline struct iq_post_status
300 __post_command2(struct octeon_instr_queue
*iq
, u8
*cmd
)
302 struct iq_post_status st
;
304 st
.status
= IQ_SEND_OK
;
306 /* This ensures that the read index does not wrap around to the same
307 * position if queue gets full before Octeon could fetch any instr.
309 if (atomic_read(&iq
->instr_pending
) >= (s32
)(iq
->max_count
- 1)) {
310 st
.status
= IQ_SEND_FAILED
;
315 if (atomic_read(&iq
->instr_pending
) >= (s32
)(iq
->max_count
- 2))
316 st
.status
= IQ_SEND_STOP
;
318 __copy_cmd_into_iq(iq
, cmd
);
320 /* "index" is returned, host_write_index is modified. */
321 st
.index
= iq
->host_write_index
;
322 INCR_INDEX_BY1(iq
->host_write_index
, iq
->max_count
);
325 /* Flush the command into memory. We need to be sure the data is in
326 * memory before indicating that the instruction is pending.
330 atomic_inc(&iq
->instr_pending
);
336 octeon_register_reqtype_free_fn(struct octeon_device
*oct
, int reqtype
,
339 if (reqtype
> REQTYPE_LAST
) {
340 dev_err(&oct
->pci_dev
->dev
, "%s: Invalid reqtype: %d\n",
345 reqtype_free_fn
[oct
->octeon_id
][reqtype
] = fn
;
351 __add_to_request_list(struct octeon_instr_queue
*iq
,
352 int idx
, void *buf
, int reqtype
)
354 iq
->request_list
[idx
].buf
= buf
;
355 iq
->request_list
[idx
].reqtype
= reqtype
;
358 /* Can only run in process context */
360 lio_process_iq_request_list(struct octeon_device
*oct
,
361 struct octeon_instr_queue
*iq
, u32 napi_budget
)
365 u32 old
= iq
->flush_index
;
367 unsigned int pkts_compl
= 0, bytes_compl
= 0;
368 struct octeon_soft_command
*sc
;
369 struct octeon_instr_irh
*irh
;
372 while (old
!= iq
->octeon_read_index
) {
373 reqtype
= iq
->request_list
[old
].reqtype
;
374 buf
= iq
->request_list
[old
].buf
;
376 if (reqtype
== REQTYPE_NONE
)
379 octeon_update_tx_completion_counters(buf
, reqtype
, &pkts_compl
,
383 case REQTYPE_NORESP_NET
:
384 case REQTYPE_NORESP_NET_SG
:
385 case REQTYPE_RESP_NET_SG
:
386 reqtype_free_fn
[oct
->octeon_id
][reqtype
](buf
);
388 case REQTYPE_RESP_NET
:
389 case REQTYPE_SOFT_COMMAND
:
392 if (OCTEON_CN23XX_PF(oct
))
393 irh
= (struct octeon_instr_irh
*)
396 irh
= (struct octeon_instr_irh
*)
399 /* We're expecting a response from Octeon.
400 * It's up to lio_process_ordered_list() to
401 * process sc. Add sc to the ordered soft
402 * command response list because we expect
403 * a response from Octeon.
407 [OCTEON_ORDERED_SC_LIST
].lock
,
409 atomic_inc(&oct
->response_list
410 [OCTEON_ORDERED_SC_LIST
].
412 list_add_tail(&sc
->node
, &oct
->response_list
413 [OCTEON_ORDERED_SC_LIST
].head
);
414 spin_unlock_irqrestore
416 [OCTEON_ORDERED_SC_LIST
].lock
,
420 /* This callback must not sleep */
421 sc
->callback(oct
, OCTEON_REQUEST_DONE
,
427 dev_err(&oct
->pci_dev
->dev
,
428 "%s Unknown reqtype: %d buf: %p at idx %d\n",
429 __func__
, reqtype
, buf
, old
);
432 iq
->request_list
[old
].buf
= NULL
;
433 iq
->request_list
[old
].reqtype
= 0;
437 INCR_INDEX_BY1(old
, iq
->max_count
);
439 if ((napi_budget
) && (inst_count
>= napi_budget
))
443 octeon_report_tx_completion_to_bql(iq
->app_ctx
, pkts_compl
,
445 iq
->flush_index
= old
;
450 /* Can only be called from process context */
452 octeon_flush_iq(struct octeon_device
*oct
, struct octeon_instr_queue
*iq
,
453 u32 pending_thresh
, u32 napi_budget
)
455 u32 inst_processed
= 0;
456 u32 tot_inst_processed
= 0;
459 if (!spin_trylock(&iq
->iq_flush_running_lock
))
462 spin_lock_bh(&iq
->lock
);
464 iq
->octeon_read_index
= oct
->fn_list
.update_iq_read_idx(iq
);
466 if (atomic_read(&iq
->instr_pending
) >= (s32
)pending_thresh
) {
468 /* Process any outstanding IQ packets. */
469 if (iq
->flush_index
== iq
->octeon_read_index
)
473 inst_processed
= lio_process_iq_request_list
475 napi_budget
- tot_inst_processed
);
478 lio_process_iq_request_list(oct
, iq
, 0);
480 if (inst_processed
) {
481 atomic_sub(inst_processed
, &iq
->instr_pending
);
482 iq
->stats
.instr_processed
+= inst_processed
;
485 tot_inst_processed
+= inst_processed
;
488 } while (tot_inst_processed
< napi_budget
);
490 if (napi_budget
&& (tot_inst_processed
>= napi_budget
))
494 iq
->last_db_time
= jiffies
;
496 spin_unlock_bh(&iq
->lock
);
498 spin_unlock(&iq
->iq_flush_running_lock
);
503 /* Process instruction queue after timeout.
504 * This routine gets called from a workqueue or when removing the module.
506 static void __check_db_timeout(struct octeon_device
*oct
, u64 iq_no
)
508 struct octeon_instr_queue
*iq
;
514 iq
= oct
->instr_queue
[iq_no
];
518 /* return immediately, if no work pending */
519 if (!atomic_read(&iq
->instr_pending
))
521 /* If jiffies - last_db_time < db_timeout do nothing */
522 next_time
= iq
->last_db_time
+ iq
->db_timeout
;
523 if (!time_after(jiffies
, (unsigned long)next_time
))
525 iq
->last_db_time
= jiffies
;
527 /* Flush the instruction queue */
528 octeon_flush_iq(oct
, iq
, 1, 0);
530 lio_enable_irq(NULL
, iq
);
533 /* Called by the Poll thread at regular intervals to check the instruction
534 * queue for commands to be posted and for commands that were fetched by Octeon.
536 static void check_db_timeout(struct work_struct
*work
)
538 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
539 struct octeon_device
*oct
= (struct octeon_device
*)wk
->ctxptr
;
540 u64 iq_no
= wk
->ctxul
;
541 struct cavium_wq
*db_wq
= &oct
->check_db_wq
[iq_no
];
544 __check_db_timeout(oct
, iq_no
);
545 queue_delayed_work(db_wq
->wq
, &db_wq
->wk
.work
, msecs_to_jiffies(delay
));
549 octeon_send_command(struct octeon_device
*oct
, u32 iq_no
,
550 u32 force_db
, void *cmd
, void *buf
,
551 u32 datasize
, u32 reqtype
)
553 struct iq_post_status st
;
554 struct octeon_instr_queue
*iq
= oct
->instr_queue
[iq_no
];
556 /* Get the lock and prevent other tasks and tx interrupt handler from
559 spin_lock_bh(&iq
->post_lock
);
561 st
= __post_command2(iq
, cmd
);
563 if (st
.status
!= IQ_SEND_FAILED
) {
564 octeon_report_sent_bytes_to_bql(buf
, reqtype
);
565 __add_to_request_list(iq
, st
.index
, buf
, reqtype
);
566 INCR_INSTRQUEUE_PKT_COUNT(oct
, iq_no
, bytes_sent
, datasize
);
567 INCR_INSTRQUEUE_PKT_COUNT(oct
, iq_no
, instr_posted
, 1);
570 ring_doorbell(oct
, iq
);
572 INCR_INSTRQUEUE_PKT_COUNT(oct
, iq_no
, instr_dropped
, 1);
575 spin_unlock_bh(&iq
->post_lock
);
577 /* This is only done here to expedite packets being flushed
578 * for cases where there are no IQ completion interrupts.
580 /*if (iq->do_auto_flush)*/
581 /* octeon_flush_iq(oct, iq, 2, 0);*/
587 octeon_prepare_soft_command(struct octeon_device
*oct
,
588 struct octeon_soft_command
*sc
,
595 struct octeon_config
*oct_cfg
;
596 struct octeon_instr_ih2
*ih2
;
597 struct octeon_instr_ih3
*ih3
;
598 struct octeon_instr_pki_ih3
*pki_ih3
;
599 struct octeon_instr_irh
*irh
;
600 struct octeon_instr_rdp
*rdp
;
602 WARN_ON(opcode
> 15);
603 WARN_ON(subcode
> 127);
605 oct_cfg
= octeon_get_conf(oct
);
607 if (OCTEON_CN23XX_PF(oct
)) {
608 ih3
= (struct octeon_instr_ih3
*)&sc
->cmd
.cmd3
.ih3
;
610 ih3
->pkind
= oct
->instr_queue
[sc
->iq_no
]->txpciq
.s
.pkind
;
612 pki_ih3
= (struct octeon_instr_pki_ih3
*)&sc
->cmd
.cmd3
.pki_ih3
;
618 oct
->instr_queue
[sc
->iq_no
]->txpciq
.s
.use_qpg
;
620 pki_ih3
->tag
= LIO_CONTROL
;
621 pki_ih3
->tagtype
= ATOMIC_TAG
;
623 oct
->instr_queue
[sc
->iq_no
]->txpciq
.s
.qpg
;
628 ih3
->dlengsz
= sc
->datasize
;
630 irh
= (struct octeon_instr_irh
*)&sc
->cmd
.cmd3
.irh
;
631 irh
->opcode
= opcode
;
632 irh
->subcode
= subcode
;
634 /* opcode/subcode specific parameters (ossp) */
635 irh
->ossp
= irh_ossp
;
636 sc
->cmd
.cmd3
.ossp
[0] = ossp0
;
637 sc
->cmd
.cmd3
.ossp
[1] = ossp1
;
640 rdp
= (struct octeon_instr_rdp
*)&sc
->cmd
.cmd3
.rdp
;
641 rdp
->pcie_port
= oct
->pcie_port
;
642 rdp
->rlen
= sc
->rdatasize
;
646 /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
647 ih3
->fsz
= LIO_SOFTCMDRESP_IH3
;
651 /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
652 ih3
->fsz
= LIO_PCICMD_O3
;
656 ih2
= (struct octeon_instr_ih2
*)&sc
->cmd
.cmd2
.ih2
;
657 ih2
->tagtype
= ATOMIC_TAG
;
658 ih2
->tag
= LIO_CONTROL
;
660 ih2
->grp
= CFG_GET_CTRL_Q_GRP(oct_cfg
);
663 ih2
->dlengsz
= sc
->datasize
;
667 irh
= (struct octeon_instr_irh
*)&sc
->cmd
.cmd2
.irh
;
668 irh
->opcode
= opcode
;
669 irh
->subcode
= subcode
;
671 /* opcode/subcode specific parameters (ossp) */
672 irh
->ossp
= irh_ossp
;
673 sc
->cmd
.cmd2
.ossp
[0] = ossp0
;
674 sc
->cmd
.cmd2
.ossp
[1] = ossp1
;
677 rdp
= (struct octeon_instr_rdp
*)&sc
->cmd
.cmd2
.rdp
;
678 rdp
->pcie_port
= oct
->pcie_port
;
679 rdp
->rlen
= sc
->rdatasize
;
682 /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
683 ih2
->fsz
= LIO_SOFTCMDRESP_IH2
;
686 /* irh + ossp[0] + ossp[1] = 24 bytes */
687 ih2
->fsz
= LIO_PCICMD_O2
;
692 int octeon_send_soft_command(struct octeon_device
*oct
,
693 struct octeon_soft_command
*sc
)
695 struct octeon_instr_ih2
*ih2
;
696 struct octeon_instr_ih3
*ih3
;
697 struct octeon_instr_irh
*irh
;
700 if (OCTEON_CN23XX_PF(oct
)) {
701 ih3
= (struct octeon_instr_ih3
*)&sc
->cmd
.cmd3
.ih3
;
703 WARN_ON(!sc
->dmadptr
);
704 sc
->cmd
.cmd3
.dptr
= sc
->dmadptr
;
706 irh
= (struct octeon_instr_irh
*)&sc
->cmd
.cmd3
.irh
;
708 WARN_ON(!sc
->dmarptr
);
709 WARN_ON(!sc
->status_word
);
710 *sc
->status_word
= COMPLETION_WORD_INIT
;
711 sc
->cmd
.cmd3
.rptr
= sc
->dmarptr
;
713 len
= (u32
)ih3
->dlengsz
;
715 ih2
= (struct octeon_instr_ih2
*)&sc
->cmd
.cmd2
.ih2
;
717 WARN_ON(!sc
->dmadptr
);
718 sc
->cmd
.cmd2
.dptr
= sc
->dmadptr
;
720 irh
= (struct octeon_instr_irh
*)&sc
->cmd
.cmd2
.irh
;
722 WARN_ON(!sc
->dmarptr
);
723 WARN_ON(!sc
->status_word
);
724 *sc
->status_word
= COMPLETION_WORD_INIT
;
725 sc
->cmd
.cmd2
.rptr
= sc
->dmarptr
;
727 len
= (u32
)ih2
->dlengsz
;
731 sc
->timeout
= jiffies
+ sc
->wait_time
;
733 return (octeon_send_command(oct
, sc
->iq_no
, 1, &sc
->cmd
, sc
,
734 len
, REQTYPE_SOFT_COMMAND
));
737 int octeon_setup_sc_buffer_pool(struct octeon_device
*oct
)
741 struct octeon_soft_command
*sc
;
743 INIT_LIST_HEAD(&oct
->sc_buf_pool
.head
);
744 spin_lock_init(&oct
->sc_buf_pool
.lock
);
745 atomic_set(&oct
->sc_buf_pool
.alloc_buf_count
, 0);
747 for (i
= 0; i
< MAX_SOFT_COMMAND_BUFFERS
; i
++) {
748 sc
= (struct octeon_soft_command
*)
750 SOFT_COMMAND_BUFFER_SIZE
,
751 (dma_addr_t
*)&dma_addr
);
755 sc
->dma_addr
= dma_addr
;
756 sc
->size
= SOFT_COMMAND_BUFFER_SIZE
;
758 list_add_tail(&sc
->node
, &oct
->sc_buf_pool
.head
);
764 int octeon_free_sc_buffer_pool(struct octeon_device
*oct
)
766 struct list_head
*tmp
, *tmp2
;
767 struct octeon_soft_command
*sc
;
769 spin_lock_bh(&oct
->sc_buf_pool
.lock
);
771 list_for_each_safe(tmp
, tmp2
, &oct
->sc_buf_pool
.head
) {
774 sc
= (struct octeon_soft_command
*)tmp
;
776 lio_dma_free(oct
, sc
->size
, sc
, sc
->dma_addr
);
779 INIT_LIST_HEAD(&oct
->sc_buf_pool
.head
);
781 spin_unlock_bh(&oct
->sc_buf_pool
.lock
);
786 struct octeon_soft_command
*octeon_alloc_soft_command(struct octeon_device
*oct
,
793 u32 offset
= sizeof(struct octeon_soft_command
);
794 struct octeon_soft_command
*sc
= NULL
;
795 struct list_head
*tmp
;
797 WARN_ON((offset
+ datasize
+ rdatasize
+ ctxsize
) >
798 SOFT_COMMAND_BUFFER_SIZE
);
800 spin_lock_bh(&oct
->sc_buf_pool
.lock
);
802 if (list_empty(&oct
->sc_buf_pool
.head
)) {
803 spin_unlock_bh(&oct
->sc_buf_pool
.lock
);
807 list_for_each(tmp
, &oct
->sc_buf_pool
.head
)
812 atomic_inc(&oct
->sc_buf_pool
.alloc_buf_count
);
814 spin_unlock_bh(&oct
->sc_buf_pool
.lock
);
816 sc
= (struct octeon_soft_command
*)tmp
;
818 dma_addr
= sc
->dma_addr
;
821 memset(sc
, 0, sc
->size
);
823 sc
->dma_addr
= dma_addr
;
827 sc
->ctxptr
= (u8
*)sc
+ offset
;
828 sc
->ctxsize
= ctxsize
;
831 /* Start data at 128 byte boundary */
832 offset
= (offset
+ ctxsize
+ 127) & 0xffffff80;
835 sc
->virtdptr
= (u8
*)sc
+ offset
;
836 sc
->dmadptr
= dma_addr
+ offset
;
837 sc
->datasize
= datasize
;
840 /* Start rdata at 128 byte boundary */
841 offset
= (offset
+ datasize
+ 127) & 0xffffff80;
844 WARN_ON(rdatasize
< 16);
845 sc
->virtrptr
= (u8
*)sc
+ offset
;
846 sc
->dmarptr
= dma_addr
+ offset
;
847 sc
->rdatasize
= rdatasize
;
848 sc
->status_word
= (u64
*)((u8
*)(sc
->virtrptr
) + rdatasize
- 8);
854 void octeon_free_soft_command(struct octeon_device
*oct
,
855 struct octeon_soft_command
*sc
)
857 spin_lock_bh(&oct
->sc_buf_pool
.lock
);
859 list_add_tail(&sc
->node
, &oct
->sc_buf_pool
.head
);
861 atomic_dec(&oct
->sc_buf_pool
.alloc_buf_count
);
863 spin_unlock_bh(&oct
->sc_buf_pool
.lock
);