Merge git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
[deliverable/linux.git] / drivers / net / ethernet / cavium / liquidio / request_manager.c
1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2015 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/version.h>
23 #include <linux/types.h>
24 #include <linux/list.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/kthread.h>
28 #include <linux/netdevice.h>
29 #include <linux/vmalloc.h>
30 #include "octeon_config.h"
31 #include "liquidio_common.h"
32 #include "octeon_droq.h"
33 #include "octeon_iq.h"
34 #include "response_manager.h"
35 #include "octeon_device.h"
36 #include "octeon_nic.h"
37 #include "octeon_main.h"
38 #include "octeon_network.h"
39 #include "cn66xx_regs.h"
40 #include "cn66xx_device.h"
41 #include "cn68xx_regs.h"
42 #include "cn68xx_device.h"
43 #include "liquidio_image.h"
44
45 #define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
46 (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
47
48 struct iq_post_status {
49 int status;
50 int index;
51 };
52
53 static void check_db_timeout(struct work_struct *work);
54 static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
55
56 static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
57
58 static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
59 {
60 struct octeon_instr_queue *iq =
61 (struct octeon_instr_queue *)oct->instr_queue[iq_no];
62 return iq->iqcmd_64B;
63 }
64
65 #define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
66
67 /* Define this to return the request status comaptible to old code */
68 /*#define OCTEON_USE_OLD_REQ_STATUS*/
69
70 /* Return 0 on success, 1 on failure */
71 int octeon_init_instr_queue(struct octeon_device *oct,
72 union oct_txpciq txpciq,
73 u32 num_descs)
74 {
75 struct octeon_instr_queue *iq;
76 struct octeon_iq_config *conf = NULL;
77 u32 iq_no = (u32)txpciq.s.q_no;
78 u32 q_size;
79 struct cavium_wq *db_wq;
80 int orig_node = dev_to_node(&oct->pci_dev->dev);
81 int numa_node = cpu_to_node(iq_no % num_online_cpus());
82
83 if (OCTEON_CN6XXX(oct))
84 conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
85
86 if (!conf) {
87 dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
88 oct->chip_id);
89 return 1;
90 }
91
92 if (num_descs & (num_descs - 1)) {
93 dev_err(&oct->pci_dev->dev,
94 "Number of descriptors for instr queue %d not in power of 2.\n",
95 iq_no);
96 return 1;
97 }
98
99 q_size = (u32)conf->instr_type * num_descs;
100
101 iq = oct->instr_queue[iq_no];
102 iq->oct_dev = oct;
103
104 set_dev_node(&oct->pci_dev->dev, numa_node);
105 iq->base_addr = lio_dma_alloc(oct, q_size,
106 (dma_addr_t *)&iq->base_addr_dma);
107 set_dev_node(&oct->pci_dev->dev, orig_node);
108 if (!iq->base_addr)
109 iq->base_addr = lio_dma_alloc(oct, q_size,
110 (dma_addr_t *)&iq->base_addr_dma);
111 if (!iq->base_addr) {
112 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
113 iq_no);
114 return 1;
115 }
116
117 iq->max_count = num_descs;
118
119 /* Initialize a list to holds requests that have been posted to Octeon
120 * but has yet to be fetched by octeon
121 */
122 iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
123 numa_node);
124 if (!iq->request_list)
125 iq->request_list = vmalloc(sizeof(*iq->request_list) *
126 num_descs);
127 if (!iq->request_list) {
128 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
129 dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
130 iq_no);
131 return 1;
132 }
133
134 memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
135
136 dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
137 iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
138
139 iq->txpciq.u64 = txpciq.u64;
140 iq->fill_threshold = (u32)conf->db_min;
141 iq->fill_cnt = 0;
142 iq->host_write_index = 0;
143 iq->octeon_read_index = 0;
144 iq->flush_index = 0;
145 iq->last_db_time = 0;
146 iq->do_auto_flush = 1;
147 iq->db_timeout = (u32)conf->db_timeout;
148 atomic_set(&iq->instr_pending, 0);
149
150 /* Initialize the spinlock for this instruction queue */
151 spin_lock_init(&iq->lock);
152 spin_lock_init(&iq->post_lock);
153
154 spin_lock_init(&iq->iq_flush_running_lock);
155
156 oct->io_qmask.iq |= (1ULL << iq_no);
157
158 /* Set the 32B/64B mode for each input queue */
159 oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
160 iq->iqcmd_64B = (conf->instr_type == 64);
161
162 oct->fn_list.setup_iq_regs(oct, iq_no);
163
164 oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
165 WQ_MEM_RECLAIM,
166 0);
167 if (!oct->check_db_wq[iq_no].wq) {
168 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
169 dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
170 iq_no);
171 return 1;
172 }
173
174 db_wq = &oct->check_db_wq[iq_no];
175
176 INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
177 db_wq->wk.ctxptr = oct;
178 db_wq->wk.ctxul = iq_no;
179 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
180
181 return 0;
182 }
183
184 int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
185 {
186 u64 desc_size = 0, q_size;
187 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
188
189 cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
190 destroy_workqueue(oct->check_db_wq[iq_no].wq);
191
192 if (OCTEON_CN6XXX(oct))
193 desc_size =
194 CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf));
195
196 vfree(iq->request_list);
197
198 if (iq->base_addr) {
199 q_size = iq->max_count * desc_size;
200 lio_dma_free(oct, (u32)q_size, iq->base_addr,
201 iq->base_addr_dma);
202 return 0;
203 }
204 return 1;
205 }
206
207 /* Return 0 on success, 1 on failure */
208 int octeon_setup_iq(struct octeon_device *oct,
209 int ifidx,
210 int q_index,
211 union oct_txpciq txpciq,
212 u32 num_descs,
213 void *app_ctx)
214 {
215 u32 iq_no = (u32)txpciq.s.q_no;
216 int numa_node = cpu_to_node(iq_no % num_online_cpus());
217
218 if (oct->instr_queue[iq_no]) {
219 dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
220 iq_no);
221 oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
222 oct->instr_queue[iq_no]->app_ctx = app_ctx;
223 return 0;
224 }
225 oct->instr_queue[iq_no] =
226 vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
227 if (!oct->instr_queue[iq_no])
228 oct->instr_queue[iq_no] =
229 vmalloc(sizeof(struct octeon_instr_queue));
230 if (!oct->instr_queue[iq_no])
231 return 1;
232
233 memset(oct->instr_queue[iq_no], 0,
234 sizeof(struct octeon_instr_queue));
235
236 oct->instr_queue[iq_no]->q_index = q_index;
237 oct->instr_queue[iq_no]->app_ctx = app_ctx;
238 oct->instr_queue[iq_no]->ifidx = ifidx;
239
240 if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
241 vfree(oct->instr_queue[iq_no]);
242 oct->instr_queue[iq_no] = NULL;
243 return 1;
244 }
245
246 oct->num_iqs++;
247 oct->fn_list.enable_io_queues(oct);
248 return 0;
249 }
250
251 int lio_wait_for_instr_fetch(struct octeon_device *oct)
252 {
253 int i, retry = 1000, pending, instr_cnt = 0;
254
255 do {
256 instr_cnt = 0;
257
258 /*for (i = 0; i < oct->num_iqs; i++) {*/
259 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
260 if (!(oct->io_qmask.iq & (1ULL << i)))
261 continue;
262 pending =
263 atomic_read(&oct->
264 instr_queue[i]->instr_pending);
265 if (pending)
266 __check_db_timeout(oct, i);
267 instr_cnt += pending;
268 }
269
270 if (instr_cnt == 0)
271 break;
272
273 schedule_timeout_uninterruptible(1);
274
275 } while (retry-- && instr_cnt);
276
277 return instr_cnt;
278 }
279
280 static inline void
281 ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
282 {
283 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
284 writel(iq->fill_cnt, iq->doorbell_reg);
285 /* make sure doorbell write goes through */
286 mmiowb();
287 iq->fill_cnt = 0;
288 iq->last_db_time = jiffies;
289 return;
290 }
291 }
292
293 static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
294 u8 *cmd)
295 {
296 u8 *iqptr, cmdsize;
297
298 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
299 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
300
301 memcpy(iqptr, cmd, cmdsize);
302 }
303
304 static inline int
305 __post_command(struct octeon_device *octeon_dev __attribute__((unused)),
306 struct octeon_instr_queue *iq,
307 u32 force_db __attribute__((unused)), u8 *cmd)
308 {
309 u32 index = -1;
310
311 /* This ensures that the read index does not wrap around to the same
312 * position if queue gets full before Octeon could fetch any instr.
313 */
314 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1))
315 return -1;
316
317 __copy_cmd_into_iq(iq, cmd);
318
319 /* "index" is returned, host_write_index is modified. */
320 index = iq->host_write_index;
321 INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
322 iq->fill_cnt++;
323
324 /* Flush the command into memory. We need to be sure the data is in
325 * memory before indicating that the instruction is pending.
326 */
327 wmb();
328
329 atomic_inc(&iq->instr_pending);
330
331 return index;
332 }
333
334 static inline struct iq_post_status
335 __post_command2(struct octeon_device *octeon_dev __attribute__((unused)),
336 struct octeon_instr_queue *iq,
337 u32 force_db __attribute__((unused)), u8 *cmd)
338 {
339 struct iq_post_status st;
340
341 st.status = IQ_SEND_OK;
342
343 /* This ensures that the read index does not wrap around to the same
344 * position if queue gets full before Octeon could fetch any instr.
345 */
346 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
347 st.status = IQ_SEND_FAILED;
348 st.index = -1;
349 return st;
350 }
351
352 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
353 st.status = IQ_SEND_STOP;
354
355 __copy_cmd_into_iq(iq, cmd);
356
357 /* "index" is returned, host_write_index is modified. */
358 st.index = iq->host_write_index;
359 INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
360 iq->fill_cnt++;
361
362 /* Flush the command into memory. We need to be sure the data is in
363 * memory before indicating that the instruction is pending.
364 */
365 wmb();
366
367 atomic_inc(&iq->instr_pending);
368
369 return st;
370 }
371
372 int
373 octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
374 void (*fn)(void *))
375 {
376 if (reqtype > REQTYPE_LAST) {
377 dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
378 __func__, reqtype);
379 return -EINVAL;
380 }
381
382 reqtype_free_fn[oct->octeon_id][reqtype] = fn;
383
384 return 0;
385 }
386
387 static inline void
388 __add_to_request_list(struct octeon_instr_queue *iq,
389 int idx, void *buf, int reqtype)
390 {
391 iq->request_list[idx].buf = buf;
392 iq->request_list[idx].reqtype = reqtype;
393 }
394
395 int
396 lio_process_iq_request_list(struct octeon_device *oct,
397 struct octeon_instr_queue *iq, u32 napi_budget)
398 {
399 int reqtype;
400 void *buf;
401 u32 old = iq->flush_index;
402 u32 inst_count = 0;
403 unsigned int pkts_compl = 0, bytes_compl = 0;
404 struct octeon_soft_command *sc;
405 struct octeon_instr_irh *irh;
406
407 while (old != iq->octeon_read_index) {
408 reqtype = iq->request_list[old].reqtype;
409 buf = iq->request_list[old].buf;
410
411 if (reqtype == REQTYPE_NONE)
412 goto skip_this;
413
414 octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
415 &bytes_compl);
416
417 switch (reqtype) {
418 case REQTYPE_NORESP_NET:
419 case REQTYPE_NORESP_NET_SG:
420 case REQTYPE_RESP_NET_SG:
421 reqtype_free_fn[oct->octeon_id][reqtype](buf);
422 break;
423 case REQTYPE_RESP_NET:
424 case REQTYPE_SOFT_COMMAND:
425 sc = buf;
426
427 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
428 if (irh->rflag) {
429 /* We're expecting a response from Octeon.
430 * It's up to lio_process_ordered_list() to
431 * process sc. Add sc to the ordered soft
432 * command response list because we expect
433 * a response from Octeon.
434 */
435 spin_lock_bh(&oct->response_list
436 [OCTEON_ORDERED_SC_LIST].lock);
437 atomic_inc(&oct->response_list
438 [OCTEON_ORDERED_SC_LIST].
439 pending_req_count);
440 list_add_tail(&sc->node, &oct->response_list
441 [OCTEON_ORDERED_SC_LIST].head);
442 spin_unlock_bh(&oct->response_list
443 [OCTEON_ORDERED_SC_LIST].lock);
444 } else {
445 if (sc->callback) {
446 sc->callback(oct, OCTEON_REQUEST_DONE,
447 sc->callback_arg);
448 }
449 }
450 break;
451 default:
452 dev_err(&oct->pci_dev->dev,
453 "%s Unknown reqtype: %d buf: %p at idx %d\n",
454 __func__, reqtype, buf, old);
455 }
456
457 iq->request_list[old].buf = NULL;
458 iq->request_list[old].reqtype = 0;
459
460 skip_this:
461 inst_count++;
462 INCR_INDEX_BY1(old, iq->max_count);
463
464 if ((napi_budget) && (inst_count >= napi_budget))
465 break;
466 }
467 if (bytes_compl)
468 octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
469 bytes_compl);
470 iq->flush_index = old;
471
472 return inst_count;
473 }
474
475 /* Can only be called from process context */
476 int
477 octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
478 u32 pending_thresh, u32 napi_budget)
479 {
480 u32 inst_processed = 0;
481 u32 tot_inst_processed = 0;
482 int tx_done = 1;
483
484 if (!spin_trylock(&iq->iq_flush_running_lock))
485 return tx_done;
486
487 spin_lock_bh(&iq->lock);
488
489 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
490
491 if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
492 do {
493 /* Process any outstanding IQ packets. */
494 if (iq->flush_index == iq->octeon_read_index)
495 break;
496
497 if (napi_budget)
498 inst_processed = lio_process_iq_request_list
499 (oct, iq,
500 napi_budget - tot_inst_processed);
501 else
502 inst_processed =
503 lio_process_iq_request_list(oct, iq, 0);
504
505 if (inst_processed) {
506 atomic_sub(inst_processed, &iq->instr_pending);
507 iq->stats.instr_processed += inst_processed;
508 }
509
510 tot_inst_processed += inst_processed;
511 inst_processed = 0;
512
513 } while (tot_inst_processed < napi_budget);
514
515 if (napi_budget && (tot_inst_processed >= napi_budget))
516 tx_done = 0;
517 }
518
519 iq->last_db_time = jiffies;
520
521 spin_unlock_bh(&iq->lock);
522
523 spin_unlock(&iq->iq_flush_running_lock);
524
525 return tx_done;
526 }
527
528 /* Process instruction queue after timeout.
529 * This routine gets called from a workqueue or when removing the module.
530 */
531 static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
532 {
533 struct octeon_instr_queue *iq;
534 u64 next_time;
535
536 if (!oct)
537 return;
538 iq = oct->instr_queue[iq_no];
539 if (!iq)
540 return;
541
542 /* return immediately, if no work pending */
543 if (!atomic_read(&iq->instr_pending))
544 return;
545 /* If jiffies - last_db_time < db_timeout do nothing */
546 next_time = iq->last_db_time + iq->db_timeout;
547 if (!time_after(jiffies, (unsigned long)next_time))
548 return;
549 iq->last_db_time = jiffies;
550
551 /* Flush the instruction queue */
552 octeon_flush_iq(oct, iq, 1, 0);
553 }
554
555 /* Called by the Poll thread at regular intervals to check the instruction
556 * queue for commands to be posted and for commands that were fetched by Octeon.
557 */
558 static void check_db_timeout(struct work_struct *work)
559 {
560 struct cavium_wk *wk = (struct cavium_wk *)work;
561 struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
562 unsigned long iq_no = wk->ctxul;
563 struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
564
565 __check_db_timeout(oct, iq_no);
566 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
567 }
568
569 int
570 octeon_send_command(struct octeon_device *oct, u32 iq_no,
571 u32 force_db, void *cmd, void *buf,
572 u32 datasize, u32 reqtype)
573 {
574 struct iq_post_status st;
575 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
576
577 /* Get the lock and prevent other tasks and tx interrupt handler from
578 * running.
579 */
580 spin_lock_bh(&iq->post_lock);
581
582 st = __post_command2(oct, iq, force_db, cmd);
583
584 if (st.status != IQ_SEND_FAILED) {
585 octeon_report_sent_bytes_to_bql(buf, reqtype);
586 __add_to_request_list(iq, st.index, buf, reqtype);
587 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
588 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
589
590 if (iq->fill_cnt >= iq->fill_threshold || force_db)
591 ring_doorbell(oct, iq);
592 } else {
593 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
594 }
595
596 spin_unlock_bh(&iq->post_lock);
597
598 /* This is only done here to expedite packets being flushed
599 * for cases where there are no IQ completion interrupts.
600 */
601 /*if (iq->do_auto_flush)*/
602 /* octeon_flush_iq(oct, iq, 2, 0);*/
603
604 return st.status;
605 }
606
607 void
608 octeon_prepare_soft_command(struct octeon_device *oct,
609 struct octeon_soft_command *sc,
610 u8 opcode,
611 u8 subcode,
612 u32 irh_ossp,
613 u64 ossp0,
614 u64 ossp1)
615 {
616 struct octeon_config *oct_cfg;
617 struct octeon_instr_ih2 *ih2;
618 struct octeon_instr_irh *irh;
619 struct octeon_instr_rdp *rdp;
620
621 BUG_ON(opcode > 15);
622 BUG_ON(subcode > 127);
623
624 oct_cfg = octeon_get_conf(oct);
625
626 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
627 ih2->tagtype = ATOMIC_TAG;
628 ih2->tag = LIO_CONTROL;
629 ih2->raw = 1;
630 ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
631
632 if (sc->datasize) {
633 ih2->dlengsz = sc->datasize;
634 ih2->rs = 1;
635 }
636
637 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
638 irh->opcode = opcode;
639 irh->subcode = subcode;
640
641 /* opcode/subcode specific parameters (ossp) */
642 irh->ossp = irh_ossp;
643 sc->cmd.cmd2.ossp[0] = ossp0;
644 sc->cmd.cmd2.ossp[1] = ossp1;
645
646 if (sc->rdatasize) {
647 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
648 rdp->pcie_port = oct->pcie_port;
649 rdp->rlen = sc->rdatasize;
650
651 irh->rflag = 1;
652 ih2->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
653 } else {
654 irh->rflag = 0;
655 ih2->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
656 }
657 }
658
659 int octeon_send_soft_command(struct octeon_device *oct,
660 struct octeon_soft_command *sc)
661 {
662 struct octeon_instr_ih2 *ih2;
663 struct octeon_instr_irh *irh;
664 struct octeon_instr_rdp *rdp;
665 u32 len;
666
667 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
668 if (ih2->dlengsz) {
669 WARN_ON(!sc->dmadptr);
670 sc->cmd.cmd2.dptr = sc->dmadptr;
671 }
672 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
673 if (irh->rflag) {
674 BUG_ON(!sc->dmarptr);
675 BUG_ON(!sc->status_word);
676 *sc->status_word = COMPLETION_WORD_INIT;
677
678 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
679
680 sc->cmd.cmd2.rptr = sc->dmarptr;
681 }
682 len = (u32)ih2->dlengsz;
683
684 if (sc->wait_time)
685 sc->timeout = jiffies + sc->wait_time;
686
687 return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
688 len, REQTYPE_SOFT_COMMAND));
689 }
690
691 int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
692 {
693 int i;
694 u64 dma_addr;
695 struct octeon_soft_command *sc;
696
697 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
698 spin_lock_init(&oct->sc_buf_pool.lock);
699 atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
700
701 for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
702 sc = (struct octeon_soft_command *)
703 lio_dma_alloc(oct,
704 SOFT_COMMAND_BUFFER_SIZE,
705 (dma_addr_t *)&dma_addr);
706 if (!sc)
707 return 1;
708
709 sc->dma_addr = dma_addr;
710 sc->size = SOFT_COMMAND_BUFFER_SIZE;
711
712 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
713 }
714
715 return 0;
716 }
717
718 int octeon_free_sc_buffer_pool(struct octeon_device *oct)
719 {
720 struct list_head *tmp, *tmp2;
721 struct octeon_soft_command *sc;
722
723 spin_lock(&oct->sc_buf_pool.lock);
724
725 list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
726 list_del(tmp);
727
728 sc = (struct octeon_soft_command *)tmp;
729
730 lio_dma_free(oct, sc->size, sc, sc->dma_addr);
731 }
732
733 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
734
735 spin_unlock(&oct->sc_buf_pool.lock);
736
737 return 0;
738 }
739
740 struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
741 u32 datasize,
742 u32 rdatasize,
743 u32 ctxsize)
744 {
745 u64 dma_addr;
746 u32 size;
747 u32 offset = sizeof(struct octeon_soft_command);
748 struct octeon_soft_command *sc = NULL;
749 struct list_head *tmp;
750
751 BUG_ON((offset + datasize + rdatasize + ctxsize) >
752 SOFT_COMMAND_BUFFER_SIZE);
753
754 spin_lock(&oct->sc_buf_pool.lock);
755
756 if (list_empty(&oct->sc_buf_pool.head)) {
757 spin_unlock(&oct->sc_buf_pool.lock);
758 return NULL;
759 }
760
761 list_for_each(tmp, &oct->sc_buf_pool.head)
762 break;
763
764 list_del(tmp);
765
766 atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
767
768 spin_unlock(&oct->sc_buf_pool.lock);
769
770 sc = (struct octeon_soft_command *)tmp;
771
772 dma_addr = sc->dma_addr;
773 size = sc->size;
774
775 memset(sc, 0, sc->size);
776
777 sc->dma_addr = dma_addr;
778 sc->size = size;
779
780 if (ctxsize) {
781 sc->ctxptr = (u8 *)sc + offset;
782 sc->ctxsize = ctxsize;
783 }
784
785 /* Start data at 128 byte boundary */
786 offset = (offset + ctxsize + 127) & 0xffffff80;
787
788 if (datasize) {
789 sc->virtdptr = (u8 *)sc + offset;
790 sc->dmadptr = dma_addr + offset;
791 sc->datasize = datasize;
792 }
793
794 /* Start rdata at 128 byte boundary */
795 offset = (offset + datasize + 127) & 0xffffff80;
796
797 if (rdatasize) {
798 BUG_ON(rdatasize < 16);
799 sc->virtrptr = (u8 *)sc + offset;
800 sc->dmarptr = dma_addr + offset;
801 sc->rdatasize = rdatasize;
802 sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
803 }
804
805 return sc;
806 }
807
808 void octeon_free_soft_command(struct octeon_device *oct,
809 struct octeon_soft_command *sc)
810 {
811 spin_lock(&oct->sc_buf_pool.lock);
812
813 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
814
815 atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
816
817 spin_unlock(&oct->sc_buf_pool.lock);
818 }
This page took 0.055203 seconds and 6 git commands to generate.