vmxnet3: Bump up driver version number
[deliverable/linux.git] / drivers / net / ethernet / cavium / liquidio / request_manager.c
CommitLineData
f21fb3ed
RV
1/**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2015 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22#include <linux/version.h>
23#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/kthread.h>
28#include <linux/netdevice.h>
5b173cf9 29#include <linux/vmalloc.h>
f21fb3ed
RV
30#include "octeon_config.h"
31#include "liquidio_common.h"
32#include "octeon_droq.h"
33#include "octeon_iq.h"
34#include "response_manager.h"
35#include "octeon_device.h"
36#include "octeon_nic.h"
37#include "octeon_main.h"
38#include "octeon_network.h"
39#include "cn66xx_regs.h"
40#include "cn66xx_device.h"
41#include "cn68xx_regs.h"
42#include "cn68xx_device.h"
43#include "liquidio_image.h"
44
45#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
46 (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
47
48struct iq_post_status {
49 int status;
50 int index;
51};
52
53static void check_db_timeout(struct work_struct *work);
54static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no);
55
56static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
57
58static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
59{
60 struct octeon_instr_queue *iq =
61 (struct octeon_instr_queue *)oct->instr_queue[iq_no];
62 return iq->iqcmd_64B;
63}
64
65#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
66
67/* Define this to return the request status comaptible to old code */
68/*#define OCTEON_USE_OLD_REQ_STATUS*/
69
70/* Return 0 on success, 1 on failure */
71int octeon_init_instr_queue(struct octeon_device *oct,
72 u32 iq_no, u32 num_descs)
73{
74 struct octeon_instr_queue *iq;
75 struct octeon_iq_config *conf = NULL;
76 u32 q_size;
77 struct cavium_wq *db_wq;
78
79 if (OCTEON_CN6XXX(oct))
80 conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
81
82 if (!conf) {
83 dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
84 oct->chip_id);
85 return 1;
86 }
87
88 if (num_descs & (num_descs - 1)) {
89 dev_err(&oct->pci_dev->dev,
90 "Number of descriptors for instr queue %d not in power of 2.\n",
91 iq_no);
92 return 1;
93 }
94
95 q_size = (u32)conf->instr_type * num_descs;
96
97 iq = oct->instr_queue[iq_no];
98
99 iq->base_addr = lio_dma_alloc(oct, q_size,
100 (dma_addr_t *)&iq->base_addr_dma);
101 if (!iq->base_addr) {
102 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
103 iq_no);
104 return 1;
105 }
106
107 iq->max_count = num_descs;
108
109 /* Initialize a list to holds requests that have been posted to Octeon
110 * but has yet to be fetched by octeon
111 */
112 iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs);
113 if (!iq->request_list) {
114 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
115 dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
116 iq_no);
117 return 1;
118 }
119
120 memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
121
122 dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
123 iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
124
125 iq->iq_no = iq_no;
126 iq->fill_threshold = (u32)conf->db_min;
127 iq->fill_cnt = 0;
128 iq->host_write_index = 0;
129 iq->octeon_read_index = 0;
130 iq->flush_index = 0;
131 iq->last_db_time = 0;
132 iq->do_auto_flush = 1;
133 iq->db_timeout = (u32)conf->db_timeout;
134 atomic_set(&iq->instr_pending, 0);
135
136 /* Initialize the spinlock for this instruction queue */
137 spin_lock_init(&iq->lock);
138
139 oct->io_qmask.iq |= (1 << iq_no);
140
141 /* Set the 32B/64B mode for each input queue */
142 oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
143 iq->iqcmd_64B = (conf->instr_type == 64);
144
145 oct->fn_list.setup_iq_regs(oct, iq_no);
146
147 oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db");
148 if (!oct->check_db_wq[iq_no].wq) {
149 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
150 dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
151 iq_no);
152 return 1;
153 }
154
155 db_wq = &oct->check_db_wq[iq_no];
156
157 INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
158 db_wq->wk.ctxptr = oct;
159 db_wq->wk.ctxul = iq_no;
160 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
161
162 return 0;
163}
164
165int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
166{
167 u64 desc_size = 0, q_size;
168 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
169
170 cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
171 flush_workqueue(oct->check_db_wq[iq_no].wq);
172 destroy_workqueue(oct->check_db_wq[iq_no].wq);
173
174 if (OCTEON_CN6XXX(oct))
175 desc_size =
176 CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf));
177
178 if (iq->request_list)
179 vfree(iq->request_list);
180
181 if (iq->base_addr) {
182 q_size = iq->max_count * desc_size;
183 lio_dma_free(oct, (u32)q_size, iq->base_addr,
184 iq->base_addr_dma);
185 return 0;
186 }
187 return 1;
188}
189
190/* Return 0 on success, 1 on failure */
191int octeon_setup_iq(struct octeon_device *oct,
192 u32 iq_no,
193 u32 num_descs,
194 void *app_ctx)
195{
196 if (oct->instr_queue[iq_no]) {
197 dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
198 iq_no);
199 oct->instr_queue[iq_no]->app_ctx = app_ctx;
200 return 0;
201 }
202 oct->instr_queue[iq_no] =
203 vmalloc(sizeof(struct octeon_instr_queue));
204 if (!oct->instr_queue[iq_no])
205 return 1;
206
207 memset(oct->instr_queue[iq_no], 0,
208 sizeof(struct octeon_instr_queue));
209
210 oct->instr_queue[iq_no]->app_ctx = app_ctx;
211 if (octeon_init_instr_queue(oct, iq_no, num_descs)) {
212 vfree(oct->instr_queue[iq_no]);
213 oct->instr_queue[iq_no] = NULL;
214 return 1;
215 }
216
217 oct->num_iqs++;
218 oct->fn_list.enable_io_queues(oct);
219 return 0;
220}
221
222int lio_wait_for_instr_fetch(struct octeon_device *oct)
223{
224 int i, retry = 1000, pending, instr_cnt = 0;
225
226 do {
227 instr_cnt = 0;
228
229 /*for (i = 0; i < oct->num_iqs; i++) {*/
230 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
231 if (!(oct->io_qmask.iq & (1UL << i)))
232 continue;
233 pending =
234 atomic_read(&oct->
235 instr_queue[i]->instr_pending);
236 if (pending)
237 __check_db_timeout(oct, i);
238 instr_cnt += pending;
239 }
240
241 if (instr_cnt == 0)
242 break;
243
244 schedule_timeout_uninterruptible(1);
245
246 } while (retry-- && instr_cnt);
247
248 return instr_cnt;
249}
250
251static inline void
252ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
253{
254 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
255 writel(iq->fill_cnt, iq->doorbell_reg);
256 /* make sure doorbell write goes through */
257 mmiowb();
258 iq->fill_cnt = 0;
259 iq->last_db_time = jiffies;
260 return;
261 }
262}
263
264static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
265 u8 *cmd)
266{
267 u8 *iqptr, cmdsize;
268
269 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
270 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
271
272 memcpy(iqptr, cmd, cmdsize);
273}
274
275static inline int
276__post_command(struct octeon_device *octeon_dev __attribute__((unused)),
277 struct octeon_instr_queue *iq,
278 u32 force_db __attribute__((unused)), u8 *cmd)
279{
280 u32 index = -1;
281
282 /* This ensures that the read index does not wrap around to the same
283 * position if queue gets full before Octeon could fetch any instr.
284 */
285 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1))
286 return -1;
287
288 __copy_cmd_into_iq(iq, cmd);
289
290 /* "index" is returned, host_write_index is modified. */
291 index = iq->host_write_index;
292 INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
293 iq->fill_cnt++;
294
295 /* Flush the command into memory. We need to be sure the data is in
296 * memory before indicating that the instruction is pending.
297 */
298 wmb();
299
300 atomic_inc(&iq->instr_pending);
301
302 return index;
303}
304
305static inline struct iq_post_status
306__post_command2(struct octeon_device *octeon_dev __attribute__((unused)),
307 struct octeon_instr_queue *iq,
308 u32 force_db __attribute__((unused)), u8 *cmd)
309{
310 struct iq_post_status st;
311
312 st.status = IQ_SEND_OK;
313
314 /* This ensures that the read index does not wrap around to the same
315 * position if queue gets full before Octeon could fetch any instr.
316 */
317 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
318 st.status = IQ_SEND_FAILED;
319 st.index = -1;
320 return st;
321 }
322
323 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
324 st.status = IQ_SEND_STOP;
325
326 __copy_cmd_into_iq(iq, cmd);
327
328 /* "index" is returned, host_write_index is modified. */
329 st.index = iq->host_write_index;
330 INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
331 iq->fill_cnt++;
332
333 /* Flush the command into memory. We need to be sure the data is in
334 * memory before indicating that the instruction is pending.
335 */
336 wmb();
337
338 atomic_inc(&iq->instr_pending);
339
340 return st;
341}
342
343int
344octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
345 void (*fn)(void *))
346{
347 if (reqtype > REQTYPE_LAST) {
348 dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
349 __func__, reqtype);
350 return -EINVAL;
351 }
352
353 reqtype_free_fn[oct->octeon_id][reqtype] = fn;
354
355 return 0;
356}
357
358static inline void
359__add_to_request_list(struct octeon_instr_queue *iq,
360 int idx, void *buf, int reqtype)
361{
362 iq->request_list[idx].buf = buf;
363 iq->request_list[idx].reqtype = reqtype;
364}
365
366int
367lio_process_iq_request_list(struct octeon_device *oct,
368 struct octeon_instr_queue *iq)
369{
370 int reqtype;
371 void *buf;
372 u32 old = iq->flush_index;
373 u32 inst_count = 0;
374 unsigned pkts_compl = 0, bytes_compl = 0;
375 struct octeon_soft_command *sc;
376 struct octeon_instr_irh *irh;
377
378 while (old != iq->octeon_read_index) {
379 reqtype = iq->request_list[old].reqtype;
380 buf = iq->request_list[old].buf;
381
382 if (reqtype == REQTYPE_NONE)
383 goto skip_this;
384
385 octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
386 &bytes_compl);
387
388 switch (reqtype) {
389 case REQTYPE_NORESP_NET:
390 case REQTYPE_NORESP_NET_SG:
391 case REQTYPE_RESP_NET_SG:
392 reqtype_free_fn[oct->octeon_id][reqtype](buf);
393 break;
394 case REQTYPE_RESP_NET:
395 case REQTYPE_SOFT_COMMAND:
396 sc = buf;
397
398 irh = (struct octeon_instr_irh *)&sc->cmd.irh;
399 if (irh->rflag) {
400 /* We're expecting a response from Octeon.
401 * It's up to lio_process_ordered_list() to
402 * process sc. Add sc to the ordered soft
403 * command response list because we expect
404 * a response from Octeon.
405 */
406 spin_lock_bh(&oct->response_list
407 [OCTEON_ORDERED_SC_LIST].lock);
408 atomic_inc(&oct->response_list
409 [OCTEON_ORDERED_SC_LIST].
410 pending_req_count);
411 list_add_tail(&sc->node, &oct->response_list
412 [OCTEON_ORDERED_SC_LIST].head);
413 spin_unlock_bh(&oct->response_list
414 [OCTEON_ORDERED_SC_LIST].lock);
415 } else {
416 if (sc->callback) {
417 sc->callback(oct, OCTEON_REQUEST_DONE,
418 sc->callback_arg);
419 }
420 }
421 break;
422 default:
423 dev_err(&oct->pci_dev->dev,
424 "%s Unknown reqtype: %d buf: %p at idx %d\n",
425 __func__, reqtype, buf, old);
426 }
427
428 iq->request_list[old].buf = NULL;
429 iq->request_list[old].reqtype = 0;
430
431 skip_this:
432 inst_count++;
433 INCR_INDEX_BY1(old, iq->max_count);
434 }
435 if (bytes_compl)
436 octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
437 bytes_compl);
438 iq->flush_index = old;
439
440 return inst_count;
441}
442
443static inline void
444update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq)
445{
446 u32 inst_processed = 0;
447
448 /* Calculate how many commands Octeon has read and move the read index
449 * accordingly.
450 */
451 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(oct, iq);
452
453 /* Move the NORESPONSE requests to the per-device completion list. */
454 if (iq->flush_index != iq->octeon_read_index)
455 inst_processed = lio_process_iq_request_list(oct, iq);
456
5b173cf9 457 if (inst_processed) {
f21fb3ed
RV
458 atomic_sub(inst_processed, &iq->instr_pending);
459 iq->stats.instr_processed += inst_processed;
5b173cf9 460 }
f21fb3ed
RV
461}
462
463static void
464octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
465 u32 pending_thresh)
466{
467 if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
468 spin_lock_bh(&iq->lock);
469 update_iq_indices(oct, iq);
470 spin_unlock_bh(&iq->lock);
471 }
472}
473
474static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no)
475{
476 struct octeon_instr_queue *iq;
477 u64 next_time;
478
479 if (!oct)
480 return;
481 iq = oct->instr_queue[iq_no];
482 if (!iq)
483 return;
484
485 /* If jiffies - last_db_time < db_timeout do nothing */
486 next_time = iq->last_db_time + iq->db_timeout;
487 if (!time_after(jiffies, (unsigned long)next_time))
488 return;
489 iq->last_db_time = jiffies;
490
491 /* Get the lock and prevent tasklets. This routine gets called from
492 * the poll thread. Instructions can now be posted in tasklet context
493 */
494 spin_lock_bh(&iq->lock);
495 if (iq->fill_cnt != 0)
496 ring_doorbell(oct, iq);
497
498 spin_unlock_bh(&iq->lock);
499
500 /* Flush the instruction queue */
501 if (iq->do_auto_flush)
502 octeon_flush_iq(oct, iq, 1);
503}
504
505/* Called by the Poll thread at regular intervals to check the instruction
506 * queue for commands to be posted and for commands that were fetched by Octeon.
507 */
508static void check_db_timeout(struct work_struct *work)
509{
510 struct cavium_wk *wk = (struct cavium_wk *)work;
511 struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
512 unsigned long iq_no = wk->ctxul;
513 struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
514
515 __check_db_timeout(oct, iq_no);
516 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
517}
518
519int
520octeon_send_command(struct octeon_device *oct, u32 iq_no,
521 u32 force_db, void *cmd, void *buf,
522 u32 datasize, u32 reqtype)
523{
524 struct iq_post_status st;
525 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
526
527 spin_lock_bh(&iq->lock);
528
529 st = __post_command2(oct, iq, force_db, cmd);
530
531 if (st.status != IQ_SEND_FAILED) {
532 octeon_report_sent_bytes_to_bql(buf, reqtype);
533 __add_to_request_list(iq, st.index, buf, reqtype);
534 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
535 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
536
537 if (iq->fill_cnt >= iq->fill_threshold || force_db)
538 ring_doorbell(oct, iq);
539 } else {
540 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
541 }
542
543 spin_unlock_bh(&iq->lock);
544
545 if (iq->do_auto_flush)
546 octeon_flush_iq(oct, iq, 2);
547
548 return st.status;
549}
550
551void
552octeon_prepare_soft_command(struct octeon_device *oct,
553 struct octeon_soft_command *sc,
554 u8 opcode,
555 u8 subcode,
556 u32 irh_ossp,
557 u64 ossp0,
558 u64 ossp1)
559{
560 struct octeon_config *oct_cfg;
561 struct octeon_instr_ih *ih;
562 struct octeon_instr_irh *irh;
563 struct octeon_instr_rdp *rdp;
564
565 BUG_ON(opcode > 15);
566 BUG_ON(subcode > 127);
567
568 oct_cfg = octeon_get_conf(oct);
569
570 ih = (struct octeon_instr_ih *)&sc->cmd.ih;
571 ih->tagtype = ATOMIC_TAG;
572 ih->tag = LIO_CONTROL;
573 ih->raw = 1;
574 ih->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
575
576 if (sc->datasize) {
577 ih->dlengsz = sc->datasize;
578 ih->rs = 1;
579 }
580
581 irh = (struct octeon_instr_irh *)&sc->cmd.irh;
582 irh->opcode = opcode;
583 irh->subcode = subcode;
584
585 /* opcode/subcode specific parameters (ossp) */
586 irh->ossp = irh_ossp;
587 sc->cmd.ossp[0] = ossp0;
588 sc->cmd.ossp[1] = ossp1;
589
590 if (sc->rdatasize) {
591 rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
592 rdp->pcie_port = oct->pcie_port;
593 rdp->rlen = sc->rdatasize;
594
595 irh->rflag = 1;
596 irh->len = 4;
597 ih->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
598 } else {
599 irh->rflag = 0;
600 irh->len = 2;
601 ih->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
602 }
603
604 while (!(oct->io_qmask.iq & (1 << sc->iq_no)))
605 sc->iq_no++;
606}
607
608int octeon_send_soft_command(struct octeon_device *oct,
609 struct octeon_soft_command *sc)
610{
611 struct octeon_instr_ih *ih;
612 struct octeon_instr_irh *irh;
613 struct octeon_instr_rdp *rdp;
614
615 ih = (struct octeon_instr_ih *)&sc->cmd.ih;
616 if (ih->dlengsz) {
617 BUG_ON(!sc->dmadptr);
618 sc->cmd.dptr = sc->dmadptr;
619 }
620
621 irh = (struct octeon_instr_irh *)&sc->cmd.irh;
622 if (irh->rflag) {
623 BUG_ON(!sc->dmarptr);
624 BUG_ON(!sc->status_word);
625 *sc->status_word = COMPLETION_WORD_INIT;
626
627 rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
628
629 sc->cmd.rptr = sc->dmarptr;
630 }
631
632 if (sc->wait_time)
633 sc->timeout = jiffies + sc->wait_time;
634
635 return octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
636 (u32)ih->dlengsz, REQTYPE_SOFT_COMMAND);
637}
638
639int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
640{
641 int i;
642 u64 dma_addr;
643 struct octeon_soft_command *sc;
644
645 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
646 spin_lock_init(&oct->sc_buf_pool.lock);
647 atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
648
649 for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
650 sc = (struct octeon_soft_command *)
651 lio_dma_alloc(oct,
652 SOFT_COMMAND_BUFFER_SIZE,
653 (dma_addr_t *)&dma_addr);
654 if (!sc)
655 return 1;
656
657 sc->dma_addr = dma_addr;
658 sc->size = SOFT_COMMAND_BUFFER_SIZE;
659
660 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
661 }
662
663 return 0;
664}
665
666int octeon_free_sc_buffer_pool(struct octeon_device *oct)
667{
668 struct list_head *tmp, *tmp2;
669 struct octeon_soft_command *sc;
670
671 spin_lock(&oct->sc_buf_pool.lock);
672
673 list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
674 list_del(tmp);
675
676 sc = (struct octeon_soft_command *)tmp;
677
678 lio_dma_free(oct, sc->size, sc, sc->dma_addr);
679 }
680
681 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
682
683 spin_unlock(&oct->sc_buf_pool.lock);
684
685 return 0;
686}
687
688struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
689 u32 datasize,
690 u32 rdatasize,
691 u32 ctxsize)
692{
693 u64 dma_addr;
694 u32 size;
695 u32 offset = sizeof(struct octeon_soft_command);
696 struct octeon_soft_command *sc = NULL;
697 struct list_head *tmp;
698
699 BUG_ON((offset + datasize + rdatasize + ctxsize) >
700 SOFT_COMMAND_BUFFER_SIZE);
701
702 spin_lock(&oct->sc_buf_pool.lock);
703
704 if (list_empty(&oct->sc_buf_pool.head)) {
705 spin_unlock(&oct->sc_buf_pool.lock);
706 return NULL;
707 }
708
709 list_for_each(tmp, &oct->sc_buf_pool.head)
710 break;
711
712 list_del(tmp);
713
714 atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
715
716 spin_unlock(&oct->sc_buf_pool.lock);
717
718 sc = (struct octeon_soft_command *)tmp;
719
720 dma_addr = sc->dma_addr;
721 size = sc->size;
722
723 memset(sc, 0, sc->size);
724
725 sc->dma_addr = dma_addr;
726 sc->size = size;
727
728 if (ctxsize) {
729 sc->ctxptr = (u8 *)sc + offset;
730 sc->ctxsize = ctxsize;
731 }
732
733 /* Start data at 128 byte boundary */
734 offset = (offset + ctxsize + 127) & 0xffffff80;
735
736 if (datasize) {
737 sc->virtdptr = (u8 *)sc + offset;
738 sc->dmadptr = dma_addr + offset;
739 sc->datasize = datasize;
740 }
741
742 /* Start rdata at 128 byte boundary */
743 offset = (offset + datasize + 127) & 0xffffff80;
744
745 if (rdatasize) {
746 BUG_ON(rdatasize < 16);
747 sc->virtrptr = (u8 *)sc + offset;
748 sc->dmarptr = dma_addr + offset;
749 sc->rdatasize = rdatasize;
750 sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
751 }
752
753 return sc;
754}
755
756void octeon_free_soft_command(struct octeon_device *oct,
757 struct octeon_soft_command *sc)
758{
759 spin_lock(&oct->sc_buf_pool.lock);
760
761 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
762
763 atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
764
765 spin_unlock(&oct->sc_buf_pool.lock);
766}
This page took 0.05669 seconds and 5 git commands to generate.