1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2015 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/version.h>
23 #include <linux/types.h>
24 #include <linux/list.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/kthread.h>
28 #include <linux/netdevice.h>
29 #include <linux/vmalloc.h>
30 #include "octeon_config.h"
31 #include "liquidio_common.h"
32 #include "octeon_droq.h"
33 #include "octeon_iq.h"
34 #include "response_manager.h"
35 #include "octeon_device.h"
36 #include "octeon_nic.h"
37 #include "octeon_main.h"
38 #include "octeon_network.h"
39 #include "cn66xx_regs.h"
40 #include "cn66xx_device.h"
41 #include "cn68xx_regs.h"
42 #include "cn68xx_device.h"
43 #include "liquidio_image.h"
45 #define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
46 (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
48 struct iq_post_status
{
53 static void check_db_timeout(struct work_struct
*work
);
54 static void __check_db_timeout(struct octeon_device
*oct
, unsigned long iq_no
);
56 static void (*reqtype_free_fn
[MAX_OCTEON_DEVICES
][REQTYPE_LAST
+ 1]) (void *);
58 static inline int IQ_INSTR_MODE_64B(struct octeon_device
*oct
, int iq_no
)
60 struct octeon_instr_queue
*iq
=
61 (struct octeon_instr_queue
*)oct
->instr_queue
[iq_no
];
65 #define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
67 /* Define this to return the request status comaptible to old code */
68 /*#define OCTEON_USE_OLD_REQ_STATUS*/
70 /* Return 0 on success, 1 on failure */
71 int octeon_init_instr_queue(struct octeon_device
*oct
,
72 u32 iq_no
, u32 num_descs
)
74 struct octeon_instr_queue
*iq
;
75 struct octeon_iq_config
*conf
= NULL
;
77 struct cavium_wq
*db_wq
;
79 if (OCTEON_CN6XXX(oct
))
80 conf
= &(CFG_GET_IQ_CFG(CHIP_FIELD(oct
, cn6xxx
, conf
)));
83 dev_err(&oct
->pci_dev
->dev
, "Unsupported Chip %x\n",
88 if (num_descs
& (num_descs
- 1)) {
89 dev_err(&oct
->pci_dev
->dev
,
90 "Number of descriptors for instr queue %d not in power of 2.\n",
95 q_size
= (u32
)conf
->instr_type
* num_descs
;
97 iq
= oct
->instr_queue
[iq_no
];
99 iq
->base_addr
= lio_dma_alloc(oct
, q_size
,
100 (dma_addr_t
*)&iq
->base_addr_dma
);
101 if (!iq
->base_addr
) {
102 dev_err(&oct
->pci_dev
->dev
, "Cannot allocate memory for instr queue %d\n",
107 iq
->max_count
= num_descs
;
109 /* Initialize a list to holds requests that have been posted to Octeon
110 * but has yet to be fetched by octeon
112 iq
->request_list
= vmalloc(sizeof(*iq
->request_list
) * num_descs
);
113 if (!iq
->request_list
) {
114 lio_dma_free(oct
, q_size
, iq
->base_addr
, iq
->base_addr_dma
);
115 dev_err(&oct
->pci_dev
->dev
, "Alloc failed for IQ[%d] nr free list\n",
120 memset(iq
->request_list
, 0, sizeof(*iq
->request_list
) * num_descs
);
122 dev_dbg(&oct
->pci_dev
->dev
, "IQ[%d]: base: %p basedma: %llx count: %d\n",
123 iq_no
, iq
->base_addr
, iq
->base_addr_dma
, iq
->max_count
);
126 iq
->fill_threshold
= (u32
)conf
->db_min
;
128 iq
->host_write_index
= 0;
129 iq
->octeon_read_index
= 0;
131 iq
->last_db_time
= 0;
132 iq
->do_auto_flush
= 1;
133 iq
->db_timeout
= (u32
)conf
->db_timeout
;
134 atomic_set(&iq
->instr_pending
, 0);
136 /* Initialize the spinlock for this instruction queue */
137 spin_lock_init(&iq
->lock
);
139 oct
->io_qmask
.iq
|= (1 << iq_no
);
141 /* Set the 32B/64B mode for each input queue */
142 oct
->io_qmask
.iq64B
|= ((conf
->instr_type
== 64) << iq_no
);
143 iq
->iqcmd_64B
= (conf
->instr_type
== 64);
145 oct
->fn_list
.setup_iq_regs(oct
, iq_no
);
147 oct
->check_db_wq
[iq_no
].wq
= create_workqueue("check_iq_db");
148 if (!oct
->check_db_wq
[iq_no
].wq
) {
149 lio_dma_free(oct
, q_size
, iq
->base_addr
, iq
->base_addr_dma
);
150 dev_err(&oct
->pci_dev
->dev
, "check db wq create failed for iq %d\n",
155 db_wq
= &oct
->check_db_wq
[iq_no
];
157 INIT_DELAYED_WORK(&db_wq
->wk
.work
, check_db_timeout
);
158 db_wq
->wk
.ctxptr
= oct
;
159 db_wq
->wk
.ctxul
= iq_no
;
160 queue_delayed_work(db_wq
->wq
, &db_wq
->wk
.work
, msecs_to_jiffies(1));
165 int octeon_delete_instr_queue(struct octeon_device
*oct
, u32 iq_no
)
167 u64 desc_size
= 0, q_size
;
168 struct octeon_instr_queue
*iq
= oct
->instr_queue
[iq_no
];
170 cancel_delayed_work_sync(&oct
->check_db_wq
[iq_no
].wk
.work
);
171 flush_workqueue(oct
->check_db_wq
[iq_no
].wq
);
172 destroy_workqueue(oct
->check_db_wq
[iq_no
].wq
);
174 if (OCTEON_CN6XXX(oct
))
176 CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct
, cn6xxx
, conf
));
178 if (iq
->request_list
)
179 vfree(iq
->request_list
);
182 q_size
= iq
->max_count
* desc_size
;
183 lio_dma_free(oct
, (u32
)q_size
, iq
->base_addr
,
190 /* Return 0 on success, 1 on failure */
191 int octeon_setup_iq(struct octeon_device
*oct
,
196 if (oct
->instr_queue
[iq_no
]) {
197 dev_dbg(&oct
->pci_dev
->dev
, "IQ is in use. Cannot create the IQ: %d again\n",
199 oct
->instr_queue
[iq_no
]->app_ctx
= app_ctx
;
202 oct
->instr_queue
[iq_no
] =
203 vmalloc(sizeof(struct octeon_instr_queue
));
204 if (!oct
->instr_queue
[iq_no
])
207 memset(oct
->instr_queue
[iq_no
], 0,
208 sizeof(struct octeon_instr_queue
));
210 oct
->instr_queue
[iq_no
]->app_ctx
= app_ctx
;
211 if (octeon_init_instr_queue(oct
, iq_no
, num_descs
)) {
212 vfree(oct
->instr_queue
[iq_no
]);
213 oct
->instr_queue
[iq_no
] = NULL
;
218 oct
->fn_list
.enable_io_queues(oct
);
222 int lio_wait_for_instr_fetch(struct octeon_device
*oct
)
224 int i
, retry
= 1000, pending
, instr_cnt
= 0;
229 /*for (i = 0; i < oct->num_iqs; i++) {*/
230 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES
; i
++) {
231 if (!(oct
->io_qmask
.iq
& (1UL << i
)))
235 instr_queue
[i
]->instr_pending
);
237 __check_db_timeout(oct
, i
);
238 instr_cnt
+= pending
;
244 schedule_timeout_uninterruptible(1);
246 } while (retry
-- && instr_cnt
);
252 ring_doorbell(struct octeon_device
*oct
, struct octeon_instr_queue
*iq
)
254 if (atomic_read(&oct
->status
) == OCT_DEV_RUNNING
) {
255 writel(iq
->fill_cnt
, iq
->doorbell_reg
);
256 /* make sure doorbell write goes through */
259 iq
->last_db_time
= jiffies
;
264 static inline void __copy_cmd_into_iq(struct octeon_instr_queue
*iq
,
269 cmdsize
= ((iq
->iqcmd_64B
) ? 64 : 32);
270 iqptr
= iq
->base_addr
+ (cmdsize
* iq
->host_write_index
);
272 memcpy(iqptr
, cmd
, cmdsize
);
276 __post_command(struct octeon_device
*octeon_dev
__attribute__((unused
)),
277 struct octeon_instr_queue
*iq
,
278 u32 force_db
__attribute__((unused
)), u8
*cmd
)
282 /* This ensures that the read index does not wrap around to the same
283 * position if queue gets full before Octeon could fetch any instr.
285 if (atomic_read(&iq
->instr_pending
) >= (s32
)(iq
->max_count
- 1))
288 __copy_cmd_into_iq(iq
, cmd
);
290 /* "index" is returned, host_write_index is modified. */
291 index
= iq
->host_write_index
;
292 INCR_INDEX_BY1(iq
->host_write_index
, iq
->max_count
);
295 /* Flush the command into memory. We need to be sure the data is in
296 * memory before indicating that the instruction is pending.
300 atomic_inc(&iq
->instr_pending
);
305 static inline struct iq_post_status
306 __post_command2(struct octeon_device
*octeon_dev
__attribute__((unused
)),
307 struct octeon_instr_queue
*iq
,
308 u32 force_db
__attribute__((unused
)), u8
*cmd
)
310 struct iq_post_status st
;
312 st
.status
= IQ_SEND_OK
;
314 /* This ensures that the read index does not wrap around to the same
315 * position if queue gets full before Octeon could fetch any instr.
317 if (atomic_read(&iq
->instr_pending
) >= (s32
)(iq
->max_count
- 1)) {
318 st
.status
= IQ_SEND_FAILED
;
323 if (atomic_read(&iq
->instr_pending
) >= (s32
)(iq
->max_count
- 2))
324 st
.status
= IQ_SEND_STOP
;
326 __copy_cmd_into_iq(iq
, cmd
);
328 /* "index" is returned, host_write_index is modified. */
329 st
.index
= iq
->host_write_index
;
330 INCR_INDEX_BY1(iq
->host_write_index
, iq
->max_count
);
333 /* Flush the command into memory. We need to be sure the data is in
334 * memory before indicating that the instruction is pending.
338 atomic_inc(&iq
->instr_pending
);
344 octeon_register_reqtype_free_fn(struct octeon_device
*oct
, int reqtype
,
347 if (reqtype
> REQTYPE_LAST
) {
348 dev_err(&oct
->pci_dev
->dev
, "%s: Invalid reqtype: %d\n",
353 reqtype_free_fn
[oct
->octeon_id
][reqtype
] = fn
;
359 __add_to_request_list(struct octeon_instr_queue
*iq
,
360 int idx
, void *buf
, int reqtype
)
362 iq
->request_list
[idx
].buf
= buf
;
363 iq
->request_list
[idx
].reqtype
= reqtype
;
367 lio_process_iq_request_list(struct octeon_device
*oct
,
368 struct octeon_instr_queue
*iq
)
372 u32 old
= iq
->flush_index
;
374 unsigned pkts_compl
= 0, bytes_compl
= 0;
375 struct octeon_soft_command
*sc
;
376 struct octeon_instr_irh
*irh
;
378 while (old
!= iq
->octeon_read_index
) {
379 reqtype
= iq
->request_list
[old
].reqtype
;
380 buf
= iq
->request_list
[old
].buf
;
382 if (reqtype
== REQTYPE_NONE
)
385 octeon_update_tx_completion_counters(buf
, reqtype
, &pkts_compl
,
389 case REQTYPE_NORESP_NET
:
390 case REQTYPE_NORESP_NET_SG
:
391 case REQTYPE_RESP_NET_SG
:
392 reqtype_free_fn
[oct
->octeon_id
][reqtype
](buf
);
394 case REQTYPE_RESP_NET
:
395 case REQTYPE_SOFT_COMMAND
:
398 irh
= (struct octeon_instr_irh
*)&sc
->cmd
.irh
;
400 /* We're expecting a response from Octeon.
401 * It's up to lio_process_ordered_list() to
402 * process sc. Add sc to the ordered soft
403 * command response list because we expect
404 * a response from Octeon.
406 spin_lock_bh(&oct
->response_list
407 [OCTEON_ORDERED_SC_LIST
].lock
);
408 atomic_inc(&oct
->response_list
409 [OCTEON_ORDERED_SC_LIST
].
411 list_add_tail(&sc
->node
, &oct
->response_list
412 [OCTEON_ORDERED_SC_LIST
].head
);
413 spin_unlock_bh(&oct
->response_list
414 [OCTEON_ORDERED_SC_LIST
].lock
);
417 sc
->callback(oct
, OCTEON_REQUEST_DONE
,
423 dev_err(&oct
->pci_dev
->dev
,
424 "%s Unknown reqtype: %d buf: %p at idx %d\n",
425 __func__
, reqtype
, buf
, old
);
428 iq
->request_list
[old
].buf
= NULL
;
429 iq
->request_list
[old
].reqtype
= 0;
433 INCR_INDEX_BY1(old
, iq
->max_count
);
436 octeon_report_tx_completion_to_bql(iq
->app_ctx
, pkts_compl
,
438 iq
->flush_index
= old
;
444 update_iq_indices(struct octeon_device
*oct
, struct octeon_instr_queue
*iq
)
446 u32 inst_processed
= 0;
448 /* Calculate how many commands Octeon has read and move the read index
451 iq
->octeon_read_index
= oct
->fn_list
.update_iq_read_idx(oct
, iq
);
453 /* Move the NORESPONSE requests to the per-device completion list. */
454 if (iq
->flush_index
!= iq
->octeon_read_index
)
455 inst_processed
= lio_process_iq_request_list(oct
, iq
);
457 if (inst_processed
) {
458 atomic_sub(inst_processed
, &iq
->instr_pending
);
459 iq
->stats
.instr_processed
+= inst_processed
;
464 octeon_flush_iq(struct octeon_device
*oct
, struct octeon_instr_queue
*iq
,
467 if (atomic_read(&iq
->instr_pending
) >= (s32
)pending_thresh
) {
468 spin_lock_bh(&iq
->lock
);
469 update_iq_indices(oct
, iq
);
470 spin_unlock_bh(&iq
->lock
);
474 static void __check_db_timeout(struct octeon_device
*oct
, unsigned long iq_no
)
476 struct octeon_instr_queue
*iq
;
481 iq
= oct
->instr_queue
[iq_no
];
485 /* If jiffies - last_db_time < db_timeout do nothing */
486 next_time
= iq
->last_db_time
+ iq
->db_timeout
;
487 if (!time_after(jiffies
, (unsigned long)next_time
))
489 iq
->last_db_time
= jiffies
;
491 /* Get the lock and prevent tasklets. This routine gets called from
492 * the poll thread. Instructions can now be posted in tasklet context
494 spin_lock_bh(&iq
->lock
);
495 if (iq
->fill_cnt
!= 0)
496 ring_doorbell(oct
, iq
);
498 spin_unlock_bh(&iq
->lock
);
500 /* Flush the instruction queue */
501 if (iq
->do_auto_flush
)
502 octeon_flush_iq(oct
, iq
, 1);
505 /* Called by the Poll thread at regular intervals to check the instruction
506 * queue for commands to be posted and for commands that were fetched by Octeon.
508 static void check_db_timeout(struct work_struct
*work
)
510 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
511 struct octeon_device
*oct
= (struct octeon_device
*)wk
->ctxptr
;
512 unsigned long iq_no
= wk
->ctxul
;
513 struct cavium_wq
*db_wq
= &oct
->check_db_wq
[iq_no
];
515 __check_db_timeout(oct
, iq_no
);
516 queue_delayed_work(db_wq
->wq
, &db_wq
->wk
.work
, msecs_to_jiffies(1));
520 octeon_send_command(struct octeon_device
*oct
, u32 iq_no
,
521 u32 force_db
, void *cmd
, void *buf
,
522 u32 datasize
, u32 reqtype
)
524 struct iq_post_status st
;
525 struct octeon_instr_queue
*iq
= oct
->instr_queue
[iq_no
];
527 spin_lock_bh(&iq
->lock
);
529 st
= __post_command2(oct
, iq
, force_db
, cmd
);
531 if (st
.status
!= IQ_SEND_FAILED
) {
532 octeon_report_sent_bytes_to_bql(buf
, reqtype
);
533 __add_to_request_list(iq
, st
.index
, buf
, reqtype
);
534 INCR_INSTRQUEUE_PKT_COUNT(oct
, iq_no
, bytes_sent
, datasize
);
535 INCR_INSTRQUEUE_PKT_COUNT(oct
, iq_no
, instr_posted
, 1);
537 if (iq
->fill_cnt
>= iq
->fill_threshold
|| force_db
)
538 ring_doorbell(oct
, iq
);
540 INCR_INSTRQUEUE_PKT_COUNT(oct
, iq_no
, instr_dropped
, 1);
543 spin_unlock_bh(&iq
->lock
);
545 if (iq
->do_auto_flush
)
546 octeon_flush_iq(oct
, iq
, 2);
552 octeon_prepare_soft_command(struct octeon_device
*oct
,
553 struct octeon_soft_command
*sc
,
560 struct octeon_config
*oct_cfg
;
561 struct octeon_instr_ih
*ih
;
562 struct octeon_instr_irh
*irh
;
563 struct octeon_instr_rdp
*rdp
;
566 BUG_ON(subcode
> 127);
568 oct_cfg
= octeon_get_conf(oct
);
570 ih
= (struct octeon_instr_ih
*)&sc
->cmd
.ih
;
571 ih
->tagtype
= ATOMIC_TAG
;
572 ih
->tag
= LIO_CONTROL
;
574 ih
->grp
= CFG_GET_CTRL_Q_GRP(oct_cfg
);
577 ih
->dlengsz
= sc
->datasize
;
581 irh
= (struct octeon_instr_irh
*)&sc
->cmd
.irh
;
582 irh
->opcode
= opcode
;
583 irh
->subcode
= subcode
;
585 /* opcode/subcode specific parameters (ossp) */
586 irh
->ossp
= irh_ossp
;
587 sc
->cmd
.ossp
[0] = ossp0
;
588 sc
->cmd
.ossp
[1] = ossp1
;
591 rdp
= (struct octeon_instr_rdp
*)&sc
->cmd
.rdp
;
592 rdp
->pcie_port
= oct
->pcie_port
;
593 rdp
->rlen
= sc
->rdatasize
;
597 ih
->fsz
= 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
601 ih
->fsz
= 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
604 while (!(oct
->io_qmask
.iq
& (1 << sc
->iq_no
)))
608 int octeon_send_soft_command(struct octeon_device
*oct
,
609 struct octeon_soft_command
*sc
)
611 struct octeon_instr_ih
*ih
;
612 struct octeon_instr_irh
*irh
;
613 struct octeon_instr_rdp
*rdp
;
615 ih
= (struct octeon_instr_ih
*)&sc
->cmd
.ih
;
617 BUG_ON(!sc
->dmadptr
);
618 sc
->cmd
.dptr
= sc
->dmadptr
;
621 irh
= (struct octeon_instr_irh
*)&sc
->cmd
.irh
;
623 BUG_ON(!sc
->dmarptr
);
624 BUG_ON(!sc
->status_word
);
625 *sc
->status_word
= COMPLETION_WORD_INIT
;
627 rdp
= (struct octeon_instr_rdp
*)&sc
->cmd
.rdp
;
629 sc
->cmd
.rptr
= sc
->dmarptr
;
633 sc
->timeout
= jiffies
+ sc
->wait_time
;
635 return octeon_send_command(oct
, sc
->iq_no
, 1, &sc
->cmd
, sc
,
636 (u32
)ih
->dlengsz
, REQTYPE_SOFT_COMMAND
);
639 int octeon_setup_sc_buffer_pool(struct octeon_device
*oct
)
643 struct octeon_soft_command
*sc
;
645 INIT_LIST_HEAD(&oct
->sc_buf_pool
.head
);
646 spin_lock_init(&oct
->sc_buf_pool
.lock
);
647 atomic_set(&oct
->sc_buf_pool
.alloc_buf_count
, 0);
649 for (i
= 0; i
< MAX_SOFT_COMMAND_BUFFERS
; i
++) {
650 sc
= (struct octeon_soft_command
*)
652 SOFT_COMMAND_BUFFER_SIZE
,
653 (dma_addr_t
*)&dma_addr
);
657 sc
->dma_addr
= dma_addr
;
658 sc
->size
= SOFT_COMMAND_BUFFER_SIZE
;
660 list_add_tail(&sc
->node
, &oct
->sc_buf_pool
.head
);
666 int octeon_free_sc_buffer_pool(struct octeon_device
*oct
)
668 struct list_head
*tmp
, *tmp2
;
669 struct octeon_soft_command
*sc
;
671 spin_lock(&oct
->sc_buf_pool
.lock
);
673 list_for_each_safe(tmp
, tmp2
, &oct
->sc_buf_pool
.head
) {
676 sc
= (struct octeon_soft_command
*)tmp
;
678 lio_dma_free(oct
, sc
->size
, sc
, sc
->dma_addr
);
681 INIT_LIST_HEAD(&oct
->sc_buf_pool
.head
);
683 spin_unlock(&oct
->sc_buf_pool
.lock
);
688 struct octeon_soft_command
*octeon_alloc_soft_command(struct octeon_device
*oct
,
695 u32 offset
= sizeof(struct octeon_soft_command
);
696 struct octeon_soft_command
*sc
= NULL
;
697 struct list_head
*tmp
;
699 BUG_ON((offset
+ datasize
+ rdatasize
+ ctxsize
) >
700 SOFT_COMMAND_BUFFER_SIZE
);
702 spin_lock(&oct
->sc_buf_pool
.lock
);
704 if (list_empty(&oct
->sc_buf_pool
.head
)) {
705 spin_unlock(&oct
->sc_buf_pool
.lock
);
709 list_for_each(tmp
, &oct
->sc_buf_pool
.head
)
714 atomic_inc(&oct
->sc_buf_pool
.alloc_buf_count
);
716 spin_unlock(&oct
->sc_buf_pool
.lock
);
718 sc
= (struct octeon_soft_command
*)tmp
;
720 dma_addr
= sc
->dma_addr
;
723 memset(sc
, 0, sc
->size
);
725 sc
->dma_addr
= dma_addr
;
729 sc
->ctxptr
= (u8
*)sc
+ offset
;
730 sc
->ctxsize
= ctxsize
;
733 /* Start data at 128 byte boundary */
734 offset
= (offset
+ ctxsize
+ 127) & 0xffffff80;
737 sc
->virtdptr
= (u8
*)sc
+ offset
;
738 sc
->dmadptr
= dma_addr
+ offset
;
739 sc
->datasize
= datasize
;
742 /* Start rdata at 128 byte boundary */
743 offset
= (offset
+ datasize
+ 127) & 0xffffff80;
746 BUG_ON(rdatasize
< 16);
747 sc
->virtrptr
= (u8
*)sc
+ offset
;
748 sc
->dmarptr
= dma_addr
+ offset
;
749 sc
->rdatasize
= rdatasize
;
750 sc
->status_word
= (u64
*)((u8
*)(sc
->virtrptr
) + rdatasize
- 8);
756 void octeon_free_soft_command(struct octeon_device
*oct
,
757 struct octeon_soft_command
*sc
)
759 spin_lock(&oct
->sc_buf_pool
.lock
);
761 list_add_tail(&sc
->node
, &oct
->sc_buf_pool
.head
);
763 atomic_dec(&oct
->sc_buf_pool
.alloc_buf_count
);
765 spin_unlock(&oct
->sc_buf_pool
.lock
);
This page took 0.062052 seconds and 6 git commands to generate.