liquidio: Vlan filtering
[deliverable/linux.git] / drivers / net / ethernet / cavium / liquidio / request_manager.c
CommitLineData
f21fb3ed
RV
1/**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2015 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22#include <linux/version.h>
23#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/kthread.h>
28#include <linux/netdevice.h>
5b173cf9 29#include <linux/vmalloc.h>
f21fb3ed
RV
30#include "octeon_config.h"
31#include "liquidio_common.h"
32#include "octeon_droq.h"
33#include "octeon_iq.h"
34#include "response_manager.h"
35#include "octeon_device.h"
36#include "octeon_nic.h"
37#include "octeon_main.h"
38#include "octeon_network.h"
39#include "cn66xx_regs.h"
40#include "cn66xx_device.h"
41#include "cn68xx_regs.h"
42#include "cn68xx_device.h"
43#include "liquidio_image.h"
44
45#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
46 (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
47
48struct iq_post_status {
49 int status;
50 int index;
51};
52
53static void check_db_timeout(struct work_struct *work);
54static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no);
55
56static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
57
58static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
59{
60 struct octeon_instr_queue *iq =
61 (struct octeon_instr_queue *)oct->instr_queue[iq_no];
62 return iq->iqcmd_64B;
63}
64
65#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
66
67/* Define this to return the request status comaptible to old code */
68/*#define OCTEON_USE_OLD_REQ_STATUS*/
69
70/* Return 0 on success, 1 on failure */
71int octeon_init_instr_queue(struct octeon_device *oct,
26236fa9
RV
72 union oct_txpciq txpciq,
73 u32 num_descs)
f21fb3ed
RV
74{
75 struct octeon_instr_queue *iq;
76 struct octeon_iq_config *conf = NULL;
26236fa9 77 u32 iq_no = (u32)txpciq.s.q_no;
f21fb3ed
RV
78 u32 q_size;
79 struct cavium_wq *db_wq;
26236fa9
RV
80 int orig_node = dev_to_node(&oct->pci_dev->dev);
81 int numa_node = cpu_to_node(iq_no % num_online_cpus());
f21fb3ed
RV
82
83 if (OCTEON_CN6XXX(oct))
84 conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
85
86 if (!conf) {
87 dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
88 oct->chip_id);
89 return 1;
90 }
91
92 if (num_descs & (num_descs - 1)) {
93 dev_err(&oct->pci_dev->dev,
94 "Number of descriptors for instr queue %d not in power of 2.\n",
95 iq_no);
96 return 1;
97 }
98
99 q_size = (u32)conf->instr_type * num_descs;
100
101 iq = oct->instr_queue[iq_no];
6a885b60 102 iq->oct_dev = oct;
f21fb3ed 103
26236fa9 104 set_dev_node(&oct->pci_dev->dev, numa_node);
f21fb3ed
RV
105 iq->base_addr = lio_dma_alloc(oct, q_size,
106 (dma_addr_t *)&iq->base_addr_dma);
26236fa9
RV
107 set_dev_node(&oct->pci_dev->dev, orig_node);
108 if (!iq->base_addr)
109 iq->base_addr = lio_dma_alloc(oct, q_size,
110 (dma_addr_t *)&iq->base_addr_dma);
f21fb3ed
RV
111 if (!iq->base_addr) {
112 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
113 iq_no);
114 return 1;
115 }
116
117 iq->max_count = num_descs;
118
119 /* Initialize a list to holds requests that have been posted to Octeon
120 * but has yet to be fetched by octeon
121 */
26236fa9
RV
122 iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
123 numa_node);
124 if (!iq->request_list)
125 iq->request_list = vmalloc(sizeof(*iq->request_list) *
126 num_descs);
f21fb3ed
RV
127 if (!iq->request_list) {
128 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
129 dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
130 iq_no);
131 return 1;
132 }
133
134 memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
135
136 dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
137 iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
138
26236fa9 139 iq->txpciq.u64 = txpciq.u64;
f21fb3ed
RV
140 iq->fill_threshold = (u32)conf->db_min;
141 iq->fill_cnt = 0;
142 iq->host_write_index = 0;
143 iq->octeon_read_index = 0;
144 iq->flush_index = 0;
145 iq->last_db_time = 0;
146 iq->do_auto_flush = 1;
147 iq->db_timeout = (u32)conf->db_timeout;
148 atomic_set(&iq->instr_pending, 0);
149
150 /* Initialize the spinlock for this instruction queue */
151 spin_lock_init(&iq->lock);
152
63da8404 153 oct->io_qmask.iq |= (1ULL << iq_no);
f21fb3ed
RV
154
155 /* Set the 32B/64B mode for each input queue */
156 oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
157 iq->iqcmd_64B = (conf->instr_type == 64);
158
159 oct->fn_list.setup_iq_regs(oct, iq_no);
160
aaa76724
BS
161 oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
162 WQ_MEM_RECLAIM,
163 0);
f21fb3ed
RV
164 if (!oct->check_db_wq[iq_no].wq) {
165 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
166 dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
167 iq_no);
168 return 1;
169 }
170
171 db_wq = &oct->check_db_wq[iq_no];
172
173 INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
174 db_wq->wk.ctxptr = oct;
175 db_wq->wk.ctxul = iq_no;
176 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
177
178 return 0;
179}
180
181int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
182{
183 u64 desc_size = 0, q_size;
184 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
185
186 cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
f21fb3ed
RV
187 destroy_workqueue(oct->check_db_wq[iq_no].wq);
188
189 if (OCTEON_CN6XXX(oct))
190 desc_size =
191 CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf));
192
9686f310 193 vfree(iq->request_list);
f21fb3ed
RV
194
195 if (iq->base_addr) {
196 q_size = iq->max_count * desc_size;
197 lio_dma_free(oct, (u32)q_size, iq->base_addr,
198 iq->base_addr_dma);
199 return 0;
200 }
201 return 1;
202}
203
204/* Return 0 on success, 1 on failure */
205int octeon_setup_iq(struct octeon_device *oct,
0cece6c5
RV
206 int ifidx,
207 int q_index,
26236fa9 208 union oct_txpciq txpciq,
f21fb3ed
RV
209 u32 num_descs,
210 void *app_ctx)
211{
26236fa9
RV
212 u32 iq_no = (u32)txpciq.s.q_no;
213 int numa_node = cpu_to_node(iq_no % num_online_cpus());
214
f21fb3ed
RV
215 if (oct->instr_queue[iq_no]) {
216 dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
217 iq_no);
26236fa9 218 oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
f21fb3ed
RV
219 oct->instr_queue[iq_no]->app_ctx = app_ctx;
220 return 0;
221 }
222 oct->instr_queue[iq_no] =
26236fa9
RV
223 vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
224 if (!oct->instr_queue[iq_no])
225 oct->instr_queue[iq_no] =
226 vmalloc(sizeof(struct octeon_instr_queue));
f21fb3ed
RV
227 if (!oct->instr_queue[iq_no])
228 return 1;
229
230 memset(oct->instr_queue[iq_no], 0,
231 sizeof(struct octeon_instr_queue));
232
0cece6c5 233 oct->instr_queue[iq_no]->q_index = q_index;
f21fb3ed 234 oct->instr_queue[iq_no]->app_ctx = app_ctx;
0cece6c5
RV
235 oct->instr_queue[iq_no]->ifidx = ifidx;
236
26236fa9 237 if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
f21fb3ed
RV
238 vfree(oct->instr_queue[iq_no]);
239 oct->instr_queue[iq_no] = NULL;
240 return 1;
241 }
242
243 oct->num_iqs++;
244 oct->fn_list.enable_io_queues(oct);
245 return 0;
246}
247
248int lio_wait_for_instr_fetch(struct octeon_device *oct)
249{
250 int i, retry = 1000, pending, instr_cnt = 0;
251
252 do {
253 instr_cnt = 0;
254
255 /*for (i = 0; i < oct->num_iqs; i++) {*/
63da8404
RV
256 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
257 if (!(oct->io_qmask.iq & (1ULL << i)))
f21fb3ed
RV
258 continue;
259 pending =
260 atomic_read(&oct->
261 instr_queue[i]->instr_pending);
262 if (pending)
263 __check_db_timeout(oct, i);
264 instr_cnt += pending;
265 }
266
267 if (instr_cnt == 0)
268 break;
269
270 schedule_timeout_uninterruptible(1);
271
272 } while (retry-- && instr_cnt);
273
274 return instr_cnt;
275}
276
277static inline void
278ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
279{
280 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
281 writel(iq->fill_cnt, iq->doorbell_reg);
282 /* make sure doorbell write goes through */
283 mmiowb();
284 iq->fill_cnt = 0;
285 iq->last_db_time = jiffies;
286 return;
287 }
288}
289
290static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
291 u8 *cmd)
292{
293 u8 *iqptr, cmdsize;
294
295 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
296 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
297
298 memcpy(iqptr, cmd, cmdsize);
299}
300
301static inline int
302__post_command(struct octeon_device *octeon_dev __attribute__((unused)),
303 struct octeon_instr_queue *iq,
304 u32 force_db __attribute__((unused)), u8 *cmd)
305{
306 u32 index = -1;
307
308 /* This ensures that the read index does not wrap around to the same
309 * position if queue gets full before Octeon could fetch any instr.
310 */
311 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1))
312 return -1;
313
314 __copy_cmd_into_iq(iq, cmd);
315
316 /* "index" is returned, host_write_index is modified. */
317 index = iq->host_write_index;
318 INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
319 iq->fill_cnt++;
320
321 /* Flush the command into memory. We need to be sure the data is in
322 * memory before indicating that the instruction is pending.
323 */
324 wmb();
325
326 atomic_inc(&iq->instr_pending);
327
328 return index;
329}
330
331static inline struct iq_post_status
332__post_command2(struct octeon_device *octeon_dev __attribute__((unused)),
333 struct octeon_instr_queue *iq,
334 u32 force_db __attribute__((unused)), u8 *cmd)
335{
336 struct iq_post_status st;
337
338 st.status = IQ_SEND_OK;
339
340 /* This ensures that the read index does not wrap around to the same
341 * position if queue gets full before Octeon could fetch any instr.
342 */
343 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
344 st.status = IQ_SEND_FAILED;
345 st.index = -1;
346 return st;
347 }
348
349 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
350 st.status = IQ_SEND_STOP;
351
352 __copy_cmd_into_iq(iq, cmd);
353
354 /* "index" is returned, host_write_index is modified. */
355 st.index = iq->host_write_index;
356 INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
357 iq->fill_cnt++;
358
359 /* Flush the command into memory. We need to be sure the data is in
360 * memory before indicating that the instruction is pending.
361 */
362 wmb();
363
364 atomic_inc(&iq->instr_pending);
365
366 return st;
367}
368
369int
370octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
371 void (*fn)(void *))
372{
373 if (reqtype > REQTYPE_LAST) {
374 dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
375 __func__, reqtype);
376 return -EINVAL;
377 }
378
379 reqtype_free_fn[oct->octeon_id][reqtype] = fn;
380
381 return 0;
382}
383
384static inline void
385__add_to_request_list(struct octeon_instr_queue *iq,
386 int idx, void *buf, int reqtype)
387{
388 iq->request_list[idx].buf = buf;
389 iq->request_list[idx].reqtype = reqtype;
390}
391
392int
393lio_process_iq_request_list(struct octeon_device *oct,
394 struct octeon_instr_queue *iq)
395{
396 int reqtype;
397 void *buf;
398 u32 old = iq->flush_index;
399 u32 inst_count = 0;
400 unsigned pkts_compl = 0, bytes_compl = 0;
401 struct octeon_soft_command *sc;
402 struct octeon_instr_irh *irh;
403
404 while (old != iq->octeon_read_index) {
405 reqtype = iq->request_list[old].reqtype;
406 buf = iq->request_list[old].buf;
407
408 if (reqtype == REQTYPE_NONE)
409 goto skip_this;
410
411 octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
412 &bytes_compl);
413
414 switch (reqtype) {
415 case REQTYPE_NORESP_NET:
416 case REQTYPE_NORESP_NET_SG:
417 case REQTYPE_RESP_NET_SG:
418 reqtype_free_fn[oct->octeon_id][reqtype](buf);
419 break;
420 case REQTYPE_RESP_NET:
421 case REQTYPE_SOFT_COMMAND:
422 sc = buf;
423
6a885b60 424 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
f21fb3ed
RV
425 if (irh->rflag) {
426 /* We're expecting a response from Octeon.
427 * It's up to lio_process_ordered_list() to
428 * process sc. Add sc to the ordered soft
429 * command response list because we expect
430 * a response from Octeon.
431 */
432 spin_lock_bh(&oct->response_list
433 [OCTEON_ORDERED_SC_LIST].lock);
434 atomic_inc(&oct->response_list
435 [OCTEON_ORDERED_SC_LIST].
436 pending_req_count);
437 list_add_tail(&sc->node, &oct->response_list
438 [OCTEON_ORDERED_SC_LIST].head);
439 spin_unlock_bh(&oct->response_list
440 [OCTEON_ORDERED_SC_LIST].lock);
441 } else {
442 if (sc->callback) {
443 sc->callback(oct, OCTEON_REQUEST_DONE,
444 sc->callback_arg);
445 }
446 }
447 break;
448 default:
449 dev_err(&oct->pci_dev->dev,
450 "%s Unknown reqtype: %d buf: %p at idx %d\n",
451 __func__, reqtype, buf, old);
452 }
453
454 iq->request_list[old].buf = NULL;
455 iq->request_list[old].reqtype = 0;
456
457 skip_this:
458 inst_count++;
459 INCR_INDEX_BY1(old, iq->max_count);
460 }
461 if (bytes_compl)
462 octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
463 bytes_compl);
464 iq->flush_index = old;
465
466 return inst_count;
467}
468
469static inline void
470update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq)
471{
472 u32 inst_processed = 0;
473
474 /* Calculate how many commands Octeon has read and move the read index
475 * accordingly.
476 */
477 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(oct, iq);
478
479 /* Move the NORESPONSE requests to the per-device completion list. */
480 if (iq->flush_index != iq->octeon_read_index)
481 inst_processed = lio_process_iq_request_list(oct, iq);
482
5b173cf9 483 if (inst_processed) {
f21fb3ed
RV
484 atomic_sub(inst_processed, &iq->instr_pending);
485 iq->stats.instr_processed += inst_processed;
5b173cf9 486 }
f21fb3ed
RV
487}
488
489static void
490octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
491 u32 pending_thresh)
492{
493 if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
494 spin_lock_bh(&iq->lock);
495 update_iq_indices(oct, iq);
496 spin_unlock_bh(&iq->lock);
497 }
498}
499
500static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no)
501{
502 struct octeon_instr_queue *iq;
503 u64 next_time;
504
505 if (!oct)
506 return;
507 iq = oct->instr_queue[iq_no];
508 if (!iq)
509 return;
510
511 /* If jiffies - last_db_time < db_timeout do nothing */
512 next_time = iq->last_db_time + iq->db_timeout;
513 if (!time_after(jiffies, (unsigned long)next_time))
514 return;
515 iq->last_db_time = jiffies;
516
517 /* Get the lock and prevent tasklets. This routine gets called from
518 * the poll thread. Instructions can now be posted in tasklet context
519 */
520 spin_lock_bh(&iq->lock);
521 if (iq->fill_cnt != 0)
522 ring_doorbell(oct, iq);
523
524 spin_unlock_bh(&iq->lock);
525
526 /* Flush the instruction queue */
527 if (iq->do_auto_flush)
528 octeon_flush_iq(oct, iq, 1);
529}
530
531/* Called by the Poll thread at regular intervals to check the instruction
532 * queue for commands to be posted and for commands that were fetched by Octeon.
533 */
534static void check_db_timeout(struct work_struct *work)
535{
536 struct cavium_wk *wk = (struct cavium_wk *)work;
537 struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
538 unsigned long iq_no = wk->ctxul;
539 struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
540
541 __check_db_timeout(oct, iq_no);
542 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
543}
544
545int
546octeon_send_command(struct octeon_device *oct, u32 iq_no,
547 u32 force_db, void *cmd, void *buf,
548 u32 datasize, u32 reqtype)
549{
550 struct iq_post_status st;
551 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
552
553 spin_lock_bh(&iq->lock);
554
555 st = __post_command2(oct, iq, force_db, cmd);
556
557 if (st.status != IQ_SEND_FAILED) {
558 octeon_report_sent_bytes_to_bql(buf, reqtype);
559 __add_to_request_list(iq, st.index, buf, reqtype);
560 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
561 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
562
563 if (iq->fill_cnt >= iq->fill_threshold || force_db)
564 ring_doorbell(oct, iq);
565 } else {
566 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
567 }
568
569 spin_unlock_bh(&iq->lock);
570
571 if (iq->do_auto_flush)
572 octeon_flush_iq(oct, iq, 2);
573
574 return st.status;
575}
576
577void
578octeon_prepare_soft_command(struct octeon_device *oct,
579 struct octeon_soft_command *sc,
580 u8 opcode,
581 u8 subcode,
582 u32 irh_ossp,
583 u64 ossp0,
584 u64 ossp1)
585{
586 struct octeon_config *oct_cfg;
6a885b60 587 struct octeon_instr_ih2 *ih2;
f21fb3ed
RV
588 struct octeon_instr_irh *irh;
589 struct octeon_instr_rdp *rdp;
590
591 BUG_ON(opcode > 15);
592 BUG_ON(subcode > 127);
593
594 oct_cfg = octeon_get_conf(oct);
595
6a885b60
RV
596 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
597 ih2->tagtype = ATOMIC_TAG;
598 ih2->tag = LIO_CONTROL;
599 ih2->raw = 1;
600 ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
f21fb3ed
RV
601
602 if (sc->datasize) {
6a885b60
RV
603 ih2->dlengsz = sc->datasize;
604 ih2->rs = 1;
f21fb3ed
RV
605 }
606
6a885b60 607 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
f21fb3ed
RV
608 irh->opcode = opcode;
609 irh->subcode = subcode;
610
611 /* opcode/subcode specific parameters (ossp) */
612 irh->ossp = irh_ossp;
6a885b60
RV
613 sc->cmd.cmd2.ossp[0] = ossp0;
614 sc->cmd.cmd2.ossp[1] = ossp1;
f21fb3ed
RV
615
616 if (sc->rdatasize) {
6a885b60 617 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
f21fb3ed
RV
618 rdp->pcie_port = oct->pcie_port;
619 rdp->rlen = sc->rdatasize;
620
621 irh->rflag = 1;
6a885b60 622 ih2->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
f21fb3ed
RV
623 } else {
624 irh->rflag = 0;
6a885b60 625 ih2->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
f21fb3ed 626 }
f21fb3ed
RV
627}
628
629int octeon_send_soft_command(struct octeon_device *oct,
630 struct octeon_soft_command *sc)
631{
6a885b60 632 struct octeon_instr_ih2 *ih2;
f21fb3ed
RV
633 struct octeon_instr_irh *irh;
634 struct octeon_instr_rdp *rdp;
6a885b60 635 u32 len;
f21fb3ed 636
6a885b60
RV
637 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
638 if (ih2->dlengsz) {
639 WARN_ON(!sc->dmadptr);
640 sc->cmd.cmd2.dptr = sc->dmadptr;
f21fb3ed 641 }
6a885b60 642 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
f21fb3ed
RV
643 if (irh->rflag) {
644 BUG_ON(!sc->dmarptr);
645 BUG_ON(!sc->status_word);
646 *sc->status_word = COMPLETION_WORD_INIT;
647
6a885b60 648 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
f21fb3ed 649
6a885b60 650 sc->cmd.cmd2.rptr = sc->dmarptr;
f21fb3ed 651 }
6a885b60 652 len = (u32)ih2->dlengsz;
f21fb3ed
RV
653
654 if (sc->wait_time)
655 sc->timeout = jiffies + sc->wait_time;
656
6a885b60
RV
657 return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
658 len, REQTYPE_SOFT_COMMAND));
f21fb3ed
RV
659}
660
661int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
662{
663 int i;
664 u64 dma_addr;
665 struct octeon_soft_command *sc;
666
667 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
668 spin_lock_init(&oct->sc_buf_pool.lock);
669 atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
670
671 for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
672 sc = (struct octeon_soft_command *)
673 lio_dma_alloc(oct,
674 SOFT_COMMAND_BUFFER_SIZE,
675 (dma_addr_t *)&dma_addr);
676 if (!sc)
677 return 1;
678
679 sc->dma_addr = dma_addr;
680 sc->size = SOFT_COMMAND_BUFFER_SIZE;
681
682 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
683 }
684
685 return 0;
686}
687
688int octeon_free_sc_buffer_pool(struct octeon_device *oct)
689{
690 struct list_head *tmp, *tmp2;
691 struct octeon_soft_command *sc;
692
693 spin_lock(&oct->sc_buf_pool.lock);
694
695 list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
696 list_del(tmp);
697
698 sc = (struct octeon_soft_command *)tmp;
699
700 lio_dma_free(oct, sc->size, sc, sc->dma_addr);
701 }
702
703 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
704
705 spin_unlock(&oct->sc_buf_pool.lock);
706
707 return 0;
708}
709
710struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
711 u32 datasize,
712 u32 rdatasize,
713 u32 ctxsize)
714{
715 u64 dma_addr;
716 u32 size;
717 u32 offset = sizeof(struct octeon_soft_command);
718 struct octeon_soft_command *sc = NULL;
719 struct list_head *tmp;
720
721 BUG_ON((offset + datasize + rdatasize + ctxsize) >
722 SOFT_COMMAND_BUFFER_SIZE);
723
724 spin_lock(&oct->sc_buf_pool.lock);
725
726 if (list_empty(&oct->sc_buf_pool.head)) {
727 spin_unlock(&oct->sc_buf_pool.lock);
728 return NULL;
729 }
730
731 list_for_each(tmp, &oct->sc_buf_pool.head)
732 break;
733
734 list_del(tmp);
735
736 atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
737
738 spin_unlock(&oct->sc_buf_pool.lock);
739
740 sc = (struct octeon_soft_command *)tmp;
741
742 dma_addr = sc->dma_addr;
743 size = sc->size;
744
745 memset(sc, 0, sc->size);
746
747 sc->dma_addr = dma_addr;
748 sc->size = size;
749
750 if (ctxsize) {
751 sc->ctxptr = (u8 *)sc + offset;
752 sc->ctxsize = ctxsize;
753 }
754
755 /* Start data at 128 byte boundary */
756 offset = (offset + ctxsize + 127) & 0xffffff80;
757
758 if (datasize) {
759 sc->virtdptr = (u8 *)sc + offset;
760 sc->dmadptr = dma_addr + offset;
761 sc->datasize = datasize;
762 }
763
764 /* Start rdata at 128 byte boundary */
765 offset = (offset + datasize + 127) & 0xffffff80;
766
767 if (rdatasize) {
768 BUG_ON(rdatasize < 16);
769 sc->virtrptr = (u8 *)sc + offset;
770 sc->dmarptr = dma_addr + offset;
771 sc->rdatasize = rdatasize;
772 sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
773 }
774
775 return sc;
776}
777
778void octeon_free_soft_command(struct octeon_device *oct,
779 struct octeon_soft_command *sc)
780{
781 spin_lock(&oct->sc_buf_pool.lock);
782
783 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
784
785 atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
786
787 spin_unlock(&oct->sc_buf_pool.lock);
788}
This page took 0.113317 seconds and 5 git commands to generate.