4 * Main module for NetUP Universal Dual DVB-CI
6 * Copyright (C) 2014 NetUP Inc.
7 * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
8 * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/kmod.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/list.h>
30 #include <media/videobuf2-v4l2.h>
31 #include <media/videobuf2-vmalloc.h>
33 #include "netup_unidvb.h"
34 #include "cxd2841er.h"
39 static int spi_enable
;
40 module_param(spi_enable
, int, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IROTH
);
42 MODULE_DESCRIPTION("Driver for NetUP Dual Universal DVB CI PCIe card");
43 MODULE_AUTHOR("info@netup.ru");
44 MODULE_VERSION(NETUP_UNIDVB_VERSION
);
45 MODULE_LICENSE("GPL");
47 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr
);
49 /* Avalon-MM PCI-E registers */
50 #define AVL_PCIE_IENR 0x50
51 #define AVL_PCIE_ISR 0x40
52 #define AVL_IRQ_ENABLE 0x80
53 #define AVL_IRQ_ASSERTED 0x80
55 #define GPIO_REG_IO 0x4880
56 #define GPIO_REG_IO_TOGGLE 0x4882
57 #define GPIO_REG_IO_SET 0x4884
58 #define GPIO_REG_IO_CLEAR 0x4886
60 #define GPIO_FEA_RESET (1 << 0)
61 #define GPIO_FEB_RESET (1 << 1)
62 #define GPIO_RFA_CTL (1 << 2)
63 #define GPIO_RFB_CTL (1 << 3)
64 #define GPIO_FEA_TU_RESET (1 << 4)
65 #define GPIO_FEB_TU_RESET (1 << 5)
66 /* DMA base address */
67 #define NETUP_DMA0_ADDR 0x4900
68 #define NETUP_DMA1_ADDR 0x4940
69 /* 8 DMA blocks * 128 packets * 188 bytes*/
70 #define NETUP_DMA_BLOCKS_COUNT 8
71 #define NETUP_DMA_PACKETS_COUNT 128
74 #define BIT_DMA_ERROR 2
75 #define BIT_DMA_IRQ 0x200
78 * struct netup_dma_regs - the map of DMA module registers
79 * @ctrlstat_set: Control register, write to set control bits
80 * @ctrlstat_clear: Control register, write to clear control bits
81 * @start_addr_lo: DMA ring buffer start address, lower part
82 * @start_addr_hi: DMA ring buffer start address, higher part
83 * @size: DMA ring buffer size register
84 Bits [0-7]: DMA packet size, 188 bytes
85 Bits [16-23]: packets count in block, 128 packets
86 Bits [24-31]: blocks count, 8 blocks
87 * @timeout: DMA timeout in units of 8ns
88 For example, value of 375000000 equals to 3 sec
89 * @curr_addr_lo: Current ring buffer head address, lower part
90 * @curr_addr_hi: Current ring buffer head address, higher part
91 * @stat_pkt_received: Statistic register, not tested
92 * @stat_pkt_accepted: Statistic register, not tested
93 * @stat_pkt_overruns: Statistic register, not tested
94 * @stat_pkt_underruns: Statistic register, not tested
95 * @stat_fifo_overruns: Statistic register, not tested
97 struct netup_dma_regs
{
99 __le32 ctrlstat_clear
;
100 __le32 start_addr_lo
;
101 __le32 start_addr_hi
;
106 __le32 stat_pkt_received
;
107 __le32 stat_pkt_accepted
;
108 __le32 stat_pkt_overruns
;
109 __le32 stat_pkt_underruns
;
110 __le32 stat_fifo_overruns
;
111 } __packed
__aligned(1);
113 struct netup_unidvb_buffer
{
114 struct vb2_v4l2_buffer vb
;
115 struct list_head list
;
119 static int netup_unidvb_tuner_ctrl(void *priv
, int is_dvb_tc
);
120 static void netup_unidvb_queue_cleanup(struct netup_dma
*dma
);
122 static struct cxd2841er_config demod_config
= {
126 static struct horus3a_config horus3a_conf
= {
129 .set_tuner_callback
= netup_unidvb_tuner_ctrl
132 static struct ascot2e_config ascot2e_conf
= {
134 .set_tuner_callback
= netup_unidvb_tuner_ctrl
137 static struct lnbh25_config lnbh25_conf
= {
139 .data2_config
= LNBH25_TEN
| LNBH25_EXTM
142 static int netup_unidvb_tuner_ctrl(void *priv
, int is_dvb_tc
)
145 struct netup_dma
*dma
= priv
;
146 struct netup_unidvb_dev
*ndev
;
151 dev_dbg(&ndev
->pci_dev
->dev
, "%s(): num %d is_dvb_tc %d\n",
152 __func__
, dma
->num
, is_dvb_tc
);
153 reg
= readb(ndev
->bmmio0
+ GPIO_REG_IO
);
154 mask
= (dma
->num
== 0) ? GPIO_RFA_CTL
: GPIO_RFB_CTL
;
159 writeb(reg
, ndev
->bmmio0
+ GPIO_REG_IO
);
163 static void netup_unidvb_dev_enable(struct netup_unidvb_dev
*ndev
)
167 /* enable PCI-E interrupts */
168 writel(AVL_IRQ_ENABLE
, ndev
->bmmio0
+ AVL_PCIE_IENR
);
169 /* unreset frontends bits[0:1] */
170 writeb(0x00, ndev
->bmmio0
+ GPIO_REG_IO
);
173 GPIO_FEA_RESET
| GPIO_FEB_RESET
|
174 GPIO_FEA_TU_RESET
| GPIO_FEB_TU_RESET
|
175 GPIO_RFA_CTL
| GPIO_RFB_CTL
;
176 writeb(gpio_reg
, ndev
->bmmio0
+ GPIO_REG_IO
);
177 dev_dbg(&ndev
->pci_dev
->dev
,
178 "%s(): AVL_PCIE_IENR 0x%x GPIO_REG_IO 0x%x\n",
179 __func__
, readl(ndev
->bmmio0
+ AVL_PCIE_IENR
),
180 (int)readb(ndev
->bmmio0
+ GPIO_REG_IO
));
184 static void netup_unidvb_dma_enable(struct netup_dma
*dma
, int enable
)
186 u32 irq_mask
= (dma
->num
== 0 ?
187 NETUP_UNIDVB_IRQ_DMA1
: NETUP_UNIDVB_IRQ_DMA2
);
189 dev_dbg(&dma
->ndev
->pci_dev
->dev
,
190 "%s(): DMA%d enable %d\n", __func__
, dma
->num
, enable
);
192 writel(BIT_DMA_RUN
, &dma
->regs
->ctrlstat_set
);
193 writew(irq_mask
, dma
->ndev
->bmmio0
+ REG_IMASK_SET
);
195 writel(BIT_DMA_RUN
, &dma
->regs
->ctrlstat_clear
);
196 writew(irq_mask
, dma
->ndev
->bmmio0
+ REG_IMASK_CLEAR
);
200 static irqreturn_t
netup_dma_interrupt(struct netup_dma
*dma
)
205 struct device
*dev
= &dma
->ndev
->pci_dev
->dev
;
207 spin_lock_irqsave(&dma
->lock
, flags
);
208 addr_curr
= ((u64
)readl(&dma
->regs
->curr_addr_hi
) << 32) |
209 (u64
)readl(&dma
->regs
->curr_addr_lo
) | dma
->high_addr
;
211 writel(BIT_DMA_IRQ
, &dma
->regs
->ctrlstat_clear
);
213 if (addr_curr
< dma
->addr_phys
||
214 addr_curr
> dma
->addr_phys
+ dma
->ring_buffer_size
) {
215 if (addr_curr
!= 0) {
217 "%s(): addr 0x%llx not from 0x%llx:0x%llx\n",
218 __func__
, addr_curr
, (u64
)dma
->addr_phys
,
219 (u64
)(dma
->addr_phys
+ dma
->ring_buffer_size
));
223 size
= (addr_curr
>= dma
->addr_last
) ?
224 (u32
)(addr_curr
- dma
->addr_last
) :
225 (u32
)(dma
->ring_buffer_size
- (dma
->addr_last
- addr_curr
));
226 if (dma
->data_size
!= 0) {
227 printk_ratelimited("%s(): lost interrupt, data size %d\n",
228 __func__
, dma
->data_size
);
229 dma
->data_size
+= size
;
231 if (dma
->data_size
== 0 || dma
->data_size
> dma
->ring_buffer_size
) {
232 dma
->data_size
= size
;
233 dma
->data_offset
= (u32
)(dma
->addr_last
- dma
->addr_phys
);
235 dma
->addr_last
= addr_curr
;
236 queue_work(dma
->ndev
->wq
, &dma
->work
);
238 spin_unlock_irqrestore(&dma
->lock
, flags
);
242 static irqreturn_t
netup_unidvb_isr(int irq
, void *dev_id
)
244 struct pci_dev
*pci_dev
= (struct pci_dev
*)dev_id
;
245 struct netup_unidvb_dev
*ndev
= pci_get_drvdata(pci_dev
);
247 irqreturn_t iret
= IRQ_NONE
;
249 /* disable interrupts */
250 writel(0, ndev
->bmmio0
+ AVL_PCIE_IENR
);
251 /* check IRQ source */
252 reg40
= readl(ndev
->bmmio0
+ AVL_PCIE_ISR
);
253 if ((reg40
& AVL_IRQ_ASSERTED
) != 0) {
254 /* IRQ is being signaled */
255 reg_isr
= readw(ndev
->bmmio0
+ REG_ISR
);
256 if (reg_isr
& NETUP_UNIDVB_IRQ_I2C0
) {
257 iret
= netup_i2c_interrupt(&ndev
->i2c
[0]);
258 } else if (reg_isr
& NETUP_UNIDVB_IRQ_I2C1
) {
259 iret
= netup_i2c_interrupt(&ndev
->i2c
[1]);
260 } else if (reg_isr
& NETUP_UNIDVB_IRQ_SPI
) {
261 iret
= netup_spi_interrupt(ndev
->spi
);
262 } else if (reg_isr
& NETUP_UNIDVB_IRQ_DMA1
) {
263 iret
= netup_dma_interrupt(&ndev
->dma
[0]);
264 } else if (reg_isr
& NETUP_UNIDVB_IRQ_DMA2
) {
265 iret
= netup_dma_interrupt(&ndev
->dma
[1]);
266 } else if (reg_isr
& NETUP_UNIDVB_IRQ_CI
) {
267 iret
= netup_ci_interrupt(ndev
);
269 dev_err(&pci_dev
->dev
,
270 "%s(): unknown interrupt 0x%x\n",
274 /* re-enable interrupts */
275 writel(AVL_IRQ_ENABLE
, ndev
->bmmio0
+ AVL_PCIE_IENR
);
279 static int netup_unidvb_queue_setup(struct vb2_queue
*vq
,
280 unsigned int *nbuffers
,
281 unsigned int *nplanes
,
282 unsigned int sizes
[],
285 struct netup_dma
*dma
= vb2_get_drv_priv(vq
);
287 dev_dbg(&dma
->ndev
->pci_dev
->dev
, "%s()\n", __func__
);
290 if (vq
->num_buffers
+ *nbuffers
< VIDEO_MAX_FRAME
)
291 *nbuffers
= VIDEO_MAX_FRAME
- vq
->num_buffers
;
292 sizes
[0] = PAGE_ALIGN(NETUP_DMA_PACKETS_COUNT
* 188);
293 dev_dbg(&dma
->ndev
->pci_dev
->dev
, "%s() nbuffers=%d sizes[0]=%d\n",
294 __func__
, *nbuffers
, sizes
[0]);
298 static int netup_unidvb_buf_prepare(struct vb2_buffer
*vb
)
300 struct netup_dma
*dma
= vb2_get_drv_priv(vb
->vb2_queue
);
301 struct vb2_v4l2_buffer
*vbuf
= to_vb2_v4l2_buffer(vb
);
302 struct netup_unidvb_buffer
*buf
= container_of(vbuf
,
303 struct netup_unidvb_buffer
, vb
);
305 dev_dbg(&dma
->ndev
->pci_dev
->dev
, "%s(): buf 0x%p\n", __func__
, buf
);
310 static void netup_unidvb_buf_queue(struct vb2_buffer
*vb
)
313 struct netup_dma
*dma
= vb2_get_drv_priv(vb
->vb2_queue
);
314 struct vb2_v4l2_buffer
*vbuf
= to_vb2_v4l2_buffer(vb
);
315 struct netup_unidvb_buffer
*buf
= container_of(vbuf
,
316 struct netup_unidvb_buffer
, vb
);
318 dev_dbg(&dma
->ndev
->pci_dev
->dev
, "%s(): %p\n", __func__
, buf
);
319 spin_lock_irqsave(&dma
->lock
, flags
);
320 list_add_tail(&buf
->list
, &dma
->free_buffers
);
321 spin_unlock_irqrestore(&dma
->lock
, flags
);
322 mod_timer(&dma
->timeout
, jiffies
+ msecs_to_jiffies(1000));
325 static int netup_unidvb_start_streaming(struct vb2_queue
*q
, unsigned int count
)
327 struct netup_dma
*dma
= vb2_get_drv_priv(q
);
329 dev_dbg(&dma
->ndev
->pci_dev
->dev
, "%s()\n", __func__
);
330 netup_unidvb_dma_enable(dma
, 1);
334 static void netup_unidvb_stop_streaming(struct vb2_queue
*q
)
336 struct netup_dma
*dma
= vb2_get_drv_priv(q
);
338 dev_dbg(&dma
->ndev
->pci_dev
->dev
, "%s()\n", __func__
);
339 netup_unidvb_dma_enable(dma
, 0);
340 netup_unidvb_queue_cleanup(dma
);
343 static struct vb2_ops dvb_qops
= {
344 .queue_setup
= netup_unidvb_queue_setup
,
345 .buf_prepare
= netup_unidvb_buf_prepare
,
346 .buf_queue
= netup_unidvb_buf_queue
,
347 .start_streaming
= netup_unidvb_start_streaming
,
348 .stop_streaming
= netup_unidvb_stop_streaming
,
351 static int netup_unidvb_queue_init(struct netup_dma
*dma
,
352 struct vb2_queue
*vb_queue
)
356 /* Init videobuf2 queue structure */
357 vb_queue
->type
= V4L2_BUF_TYPE_VIDEO_CAPTURE
;
358 vb_queue
->io_modes
= VB2_MMAP
| VB2_USERPTR
| VB2_READ
;
359 vb_queue
->drv_priv
= dma
;
360 vb_queue
->buf_struct_size
= sizeof(struct netup_unidvb_buffer
);
361 vb_queue
->ops
= &dvb_qops
;
362 vb_queue
->mem_ops
= &vb2_vmalloc_memops
;
363 vb_queue
->timestamp_flags
= V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
;
364 res
= vb2_queue_init(vb_queue
);
366 dev_err(&dma
->ndev
->pci_dev
->dev
,
367 "%s(): vb2_queue_init failed (%d)\n", __func__
, res
);
372 static int netup_unidvb_dvb_init(struct netup_unidvb_dev
*ndev
,
375 struct vb2_dvb_frontend
*fe0
, *fe1
, *fe2
;
377 if (num
< 0 || num
> 1) {
378 dev_dbg(&ndev
->pci_dev
->dev
,
379 "%s(): unable to init DVB bus %d\n", __func__
, num
);
382 mutex_init(&ndev
->frontends
[num
].lock
);
383 INIT_LIST_HEAD(&ndev
->frontends
[num
].felist
);
384 if (vb2_dvb_alloc_frontend(&ndev
->frontends
[num
], 1) == NULL
||
385 vb2_dvb_alloc_frontend(
386 &ndev
->frontends
[num
], 2) == NULL
||
387 vb2_dvb_alloc_frontend(
388 &ndev
->frontends
[num
], 3) == NULL
) {
389 dev_dbg(&ndev
->pci_dev
->dev
,
390 "%s(): unable to allocate vb2_dvb_frontend\n",
394 fe0
= vb2_dvb_get_frontend(&ndev
->frontends
[num
], 1);
395 fe1
= vb2_dvb_get_frontend(&ndev
->frontends
[num
], 2);
396 fe2
= vb2_dvb_get_frontend(&ndev
->frontends
[num
], 3);
397 if (fe0
== NULL
|| fe1
== NULL
|| fe2
== NULL
) {
398 dev_dbg(&ndev
->pci_dev
->dev
,
399 "%s(): frontends has not been allocated\n", __func__
);
402 netup_unidvb_queue_init(&ndev
->dma
[num
], &fe0
->dvb
.dvbq
);
403 netup_unidvb_queue_init(&ndev
->dma
[num
], &fe1
->dvb
.dvbq
);
404 netup_unidvb_queue_init(&ndev
->dma
[num
], &fe2
->dvb
.dvbq
);
405 fe0
->dvb
.name
= "netup_fe0";
406 fe1
->dvb
.name
= "netup_fe1";
407 fe2
->dvb
.name
= "netup_fe2";
408 fe0
->dvb
.frontend
= dvb_attach(cxd2841er_attach_s
,
409 &demod_config
, &ndev
->i2c
[num
].adap
);
410 if (fe0
->dvb
.frontend
== NULL
) {
411 dev_dbg(&ndev
->pci_dev
->dev
,
412 "%s(): unable to attach DVB-S/S2 frontend\n",
414 goto frontend_detach
;
416 horus3a_conf
.set_tuner_priv
= &ndev
->dma
[num
];
417 if (!dvb_attach(horus3a_attach
, fe0
->dvb
.frontend
,
418 &horus3a_conf
, &ndev
->i2c
[num
].adap
)) {
419 dev_dbg(&ndev
->pci_dev
->dev
,
420 "%s(): unable to attach DVB-S/S2 tuner frontend\n",
422 goto frontend_detach
;
424 if (!dvb_attach(lnbh25_attach
, fe0
->dvb
.frontend
,
425 &lnbh25_conf
, &ndev
->i2c
[num
].adap
)) {
426 dev_dbg(&ndev
->pci_dev
->dev
,
427 "%s(): unable to attach SEC frontend\n", __func__
);
428 goto frontend_detach
;
430 /* DVB-T/T2 frontend */
431 fe1
->dvb
.frontend
= dvb_attach(cxd2841er_attach_t
,
432 &demod_config
, &ndev
->i2c
[num
].adap
);
433 if (fe1
->dvb
.frontend
== NULL
) {
434 dev_dbg(&ndev
->pci_dev
->dev
,
435 "%s(): unable to attach DVB-T frontend\n", __func__
);
436 goto frontend_detach
;
438 fe1
->dvb
.frontend
->id
= 1;
439 ascot2e_conf
.set_tuner_priv
= &ndev
->dma
[num
];
440 if (!dvb_attach(ascot2e_attach
, fe1
->dvb
.frontend
,
441 &ascot2e_conf
, &ndev
->i2c
[num
].adap
)) {
442 dev_dbg(&ndev
->pci_dev
->dev
,
443 "%s(): unable to attach DVB-T tuner frontend\n",
445 goto frontend_detach
;
447 /* DVB-C/C2 frontend */
448 fe2
->dvb
.frontend
= dvb_attach(cxd2841er_attach_c
,
449 &demod_config
, &ndev
->i2c
[num
].adap
);
450 if (fe2
->dvb
.frontend
== NULL
) {
451 dev_dbg(&ndev
->pci_dev
->dev
,
452 "%s(): unable to attach DVB-C frontend\n", __func__
);
453 goto frontend_detach
;
455 fe2
->dvb
.frontend
->id
= 2;
456 if (!dvb_attach(ascot2e_attach
, fe2
->dvb
.frontend
,
457 &ascot2e_conf
, &ndev
->i2c
[num
].adap
)) {
458 dev_dbg(&ndev
->pci_dev
->dev
,
459 "%s(): unable to attach DVB-T/C tuner frontend\n",
461 goto frontend_detach
;
464 if (vb2_dvb_register_bus(&ndev
->frontends
[num
],
466 &ndev
->pci_dev
->dev
, adapter_nr
, 1)) {
467 dev_dbg(&ndev
->pci_dev
->dev
,
468 "%s(): unable to register DVB bus %d\n",
470 goto frontend_detach
;
472 dev_info(&ndev
->pci_dev
->dev
, "DVB init done, num=%d\n", num
);
475 vb2_dvb_dealloc_frontends(&ndev
->frontends
[num
]);
479 static void netup_unidvb_dvb_fini(struct netup_unidvb_dev
*ndev
, int num
)
481 if (num
< 0 || num
> 1) {
482 dev_err(&ndev
->pci_dev
->dev
,
483 "%s(): unable to unregister DVB bus %d\n",
487 vb2_dvb_unregister_bus(&ndev
->frontends
[num
]);
488 dev_info(&ndev
->pci_dev
->dev
,
489 "%s(): DVB bus %d unregistered\n", __func__
, num
);
492 static int netup_unidvb_dvb_setup(struct netup_unidvb_dev
*ndev
)
496 res
= netup_unidvb_dvb_init(ndev
, 0);
499 res
= netup_unidvb_dvb_init(ndev
, 1);
501 netup_unidvb_dvb_fini(ndev
, 0);
507 static int netup_unidvb_ring_copy(struct netup_dma
*dma
,
508 struct netup_unidvb_buffer
*buf
)
510 u32 copy_bytes
, ring_bytes
;
511 u32 buff_bytes
= NETUP_DMA_PACKETS_COUNT
* 188 - buf
->size
;
512 u8
*p
= vb2_plane_vaddr(&buf
->vb
.vb2_buf
, 0);
513 struct netup_unidvb_dev
*ndev
= dma
->ndev
;
516 dev_err(&ndev
->pci_dev
->dev
,
517 "%s(): buffer is NULL\n", __func__
);
521 if (dma
->data_offset
+ dma
->data_size
> dma
->ring_buffer_size
) {
522 ring_bytes
= dma
->ring_buffer_size
- dma
->data_offset
;
523 copy_bytes
= (ring_bytes
> buff_bytes
) ?
524 buff_bytes
: ring_bytes
;
525 memcpy_fromio(p
, (u8 __iomem
*)(dma
->addr_virt
+ dma
->data_offset
), copy_bytes
);
527 buf
->size
+= copy_bytes
;
528 buff_bytes
-= copy_bytes
;
529 dma
->data_size
-= copy_bytes
;
530 dma
->data_offset
+= copy_bytes
;
531 if (dma
->data_offset
== dma
->ring_buffer_size
)
532 dma
->data_offset
= 0;
534 if (buff_bytes
> 0) {
535 ring_bytes
= dma
->data_size
;
536 copy_bytes
= (ring_bytes
> buff_bytes
) ?
537 buff_bytes
: ring_bytes
;
538 memcpy_fromio(p
, (u8 __iomem
*)(dma
->addr_virt
+ dma
->data_offset
), copy_bytes
);
539 buf
->size
+= copy_bytes
;
540 dma
->data_size
-= copy_bytes
;
541 dma
->data_offset
+= copy_bytes
;
542 if (dma
->data_offset
== dma
->ring_buffer_size
)
543 dma
->data_offset
= 0;
548 static void netup_unidvb_dma_worker(struct work_struct
*work
)
550 struct netup_dma
*dma
= container_of(work
, struct netup_dma
, work
);
551 struct netup_unidvb_dev
*ndev
= dma
->ndev
;
552 struct netup_unidvb_buffer
*buf
;
555 spin_lock_irqsave(&dma
->lock
, flags
);
556 if (dma
->data_size
== 0) {
557 dev_dbg(&ndev
->pci_dev
->dev
,
558 "%s(): data_size == 0\n", __func__
);
561 while (dma
->data_size
> 0) {
562 if (list_empty(&dma
->free_buffers
)) {
563 dev_dbg(&ndev
->pci_dev
->dev
,
564 "%s(): no free buffers\n", __func__
);
567 buf
= list_first_entry(&dma
->free_buffers
,
568 struct netup_unidvb_buffer
, list
);
569 if (buf
->size
>= NETUP_DMA_PACKETS_COUNT
* 188) {
570 dev_dbg(&ndev
->pci_dev
->dev
,
571 "%s(): buffer overflow, size %d\n",
572 __func__
, buf
->size
);
575 if (netup_unidvb_ring_copy(dma
, buf
))
577 if (buf
->size
== NETUP_DMA_PACKETS_COUNT
* 188) {
578 list_del(&buf
->list
);
579 dev_dbg(&ndev
->pci_dev
->dev
,
580 "%s(): buffer %p done, size %d\n",
581 __func__
, buf
, buf
->size
);
582 buf
->vb
.vb2_buf
.timestamp
= ktime_get_ns();
583 vb2_set_plane_payload(&buf
->vb
.vb2_buf
, 0, buf
->size
);
584 vb2_buffer_done(&buf
->vb
.vb2_buf
, VB2_BUF_STATE_DONE
);
589 spin_unlock_irqrestore(&dma
->lock
, flags
);
592 static void netup_unidvb_queue_cleanup(struct netup_dma
*dma
)
594 struct netup_unidvb_buffer
*buf
;
597 spin_lock_irqsave(&dma
->lock
, flags
);
598 while (!list_empty(&dma
->free_buffers
)) {
599 buf
= list_first_entry(&dma
->free_buffers
,
600 struct netup_unidvb_buffer
, list
);
601 list_del(&buf
->list
);
602 vb2_buffer_done(&buf
->vb
.vb2_buf
, VB2_BUF_STATE_ERROR
);
604 spin_unlock_irqrestore(&dma
->lock
, flags
);
607 static void netup_unidvb_dma_timeout(unsigned long data
)
609 struct netup_dma
*dma
= (struct netup_dma
*)data
;
610 struct netup_unidvb_dev
*ndev
= dma
->ndev
;
612 dev_dbg(&ndev
->pci_dev
->dev
, "%s()\n", __func__
);
613 netup_unidvb_queue_cleanup(dma
);
616 static int netup_unidvb_dma_init(struct netup_unidvb_dev
*ndev
, int num
)
618 struct netup_dma
*dma
;
619 struct device
*dev
= &ndev
->pci_dev
->dev
;
621 if (num
< 0 || num
> 1) {
622 dev_err(dev
, "%s(): unable to register DMA%d\n",
626 dma
= &ndev
->dma
[num
];
627 dev_info(dev
, "%s(): starting DMA%d\n", __func__
, num
);
630 spin_lock_init(&dma
->lock
);
631 INIT_WORK(&dma
->work
, netup_unidvb_dma_worker
);
632 INIT_LIST_HEAD(&dma
->free_buffers
);
633 dma
->timeout
.function
= netup_unidvb_dma_timeout
;
634 dma
->timeout
.data
= (unsigned long)dma
;
635 init_timer(&dma
->timeout
);
636 dma
->ring_buffer_size
= ndev
->dma_size
/ 2;
637 dma
->addr_virt
= ndev
->dma_virt
+ dma
->ring_buffer_size
* num
;
638 dma
->addr_phys
= (dma_addr_t
)((u64
)ndev
->dma_phys
+
639 dma
->ring_buffer_size
* num
);
640 dev_info(dev
, "%s(): DMA%d buffer virt/phys 0x%p/0x%llx size %d\n",
641 __func__
, num
, dma
->addr_virt
,
642 (unsigned long long)dma
->addr_phys
,
643 dma
->ring_buffer_size
);
644 memset_io((u8 __iomem
*)dma
->addr_virt
, 0, dma
->ring_buffer_size
);
645 dma
->addr_last
= dma
->addr_phys
;
646 dma
->high_addr
= (u32
)(dma
->addr_phys
& 0xC0000000);
647 dma
->regs
= (struct netup_dma_regs __iomem
*)(num
== 0 ?
648 ndev
->bmmio0
+ NETUP_DMA0_ADDR
:
649 ndev
->bmmio0
+ NETUP_DMA1_ADDR
);
650 writel((NETUP_DMA_BLOCKS_COUNT
<< 24) |
651 (NETUP_DMA_PACKETS_COUNT
<< 8) | 188, &dma
->regs
->size
);
652 writel((u32
)(dma
->addr_phys
& 0x3FFFFFFF), &dma
->regs
->start_addr_lo
);
653 writel(0, &dma
->regs
->start_addr_hi
);
654 writel(dma
->high_addr
, ndev
->bmmio0
+ 0x1000);
655 writel(375000000, &dma
->regs
->timeout
);
657 writel(BIT_DMA_IRQ
, &dma
->regs
->ctrlstat_clear
);
661 static void netup_unidvb_dma_fini(struct netup_unidvb_dev
*ndev
, int num
)
663 struct netup_dma
*dma
;
665 if (num
< 0 || num
> 1)
667 dev_dbg(&ndev
->pci_dev
->dev
, "%s(): num %d\n", __func__
, num
);
668 dma
= &ndev
->dma
[num
];
669 netup_unidvb_dma_enable(dma
, 0);
671 cancel_work_sync(&dma
->work
);
672 del_timer(&dma
->timeout
);
675 static int netup_unidvb_dma_setup(struct netup_unidvb_dev
*ndev
)
679 res
= netup_unidvb_dma_init(ndev
, 0);
682 res
= netup_unidvb_dma_init(ndev
, 1);
684 netup_unidvb_dma_fini(ndev
, 0);
687 netup_unidvb_dma_enable(&ndev
->dma
[0], 0);
688 netup_unidvb_dma_enable(&ndev
->dma
[1], 0);
692 static int netup_unidvb_ci_setup(struct netup_unidvb_dev
*ndev
,
693 struct pci_dev
*pci_dev
)
697 writew(NETUP_UNIDVB_IRQ_CI
, ndev
->bmmio0
+ REG_IMASK_SET
);
698 res
= netup_unidvb_ci_register(ndev
, 0, pci_dev
);
701 res
= netup_unidvb_ci_register(ndev
, 1, pci_dev
);
703 netup_unidvb_ci_unregister(ndev
, 0);
707 static int netup_unidvb_request_mmio(struct pci_dev
*pci_dev
)
709 if (!request_mem_region(pci_resource_start(pci_dev
, 0),
710 pci_resource_len(pci_dev
, 0), NETUP_UNIDVB_NAME
)) {
711 dev_err(&pci_dev
->dev
,
712 "%s(): unable to request MMIO bar 0 at 0x%llx\n",
714 (unsigned long long)pci_resource_start(pci_dev
, 0));
717 if (!request_mem_region(pci_resource_start(pci_dev
, 1),
718 pci_resource_len(pci_dev
, 1), NETUP_UNIDVB_NAME
)) {
719 dev_err(&pci_dev
->dev
,
720 "%s(): unable to request MMIO bar 1 at 0x%llx\n",
722 (unsigned long long)pci_resource_start(pci_dev
, 1));
723 release_mem_region(pci_resource_start(pci_dev
, 0),
724 pci_resource_len(pci_dev
, 0));
730 static int netup_unidvb_request_modules(struct device
*dev
)
732 static const char * const modules
[] = {
733 "lnbh25", "ascot2e", "horus3a", "cxd2841er", NULL
735 const char * const *curr_mod
= modules
;
738 while (*curr_mod
!= NULL
) {
739 err
= request_module(*curr_mod
);
741 dev_warn(dev
, "request_module(%s) failed: %d\n",
749 static int netup_unidvb_initdev(struct pci_dev
*pci_dev
,
750 const struct pci_device_id
*pci_id
)
754 struct netup_unidvb_dev
*ndev
;
755 int old_firmware
= 0;
757 netup_unidvb_request_modules(&pci_dev
->dev
);
759 /* Check card revision */
760 if (pci_dev
->revision
!= NETUP_PCI_DEV_REVISION
) {
761 dev_err(&pci_dev
->dev
,
762 "netup_unidvb: expected card revision %d, got %d\n",
763 NETUP_PCI_DEV_REVISION
, pci_dev
->revision
);
764 dev_err(&pci_dev
->dev
,
765 "Please upgrade firmware!\n");
766 dev_err(&pci_dev
->dev
,
767 "Instructions on http://www.netup.tv\n");
772 /* allocate device context */
773 ndev
= kzalloc(sizeof(*ndev
), GFP_KERNEL
);
777 memset(ndev
, 0, sizeof(*ndev
));
778 ndev
->old_fw
= old_firmware
;
779 ndev
->wq
= create_singlethread_workqueue(NETUP_UNIDVB_NAME
);
781 dev_err(&pci_dev
->dev
,
782 "%s(): unable to create workqueue\n", __func__
);
785 ndev
->pci_dev
= pci_dev
;
786 ndev
->pci_bus
= pci_dev
->bus
->number
;
787 ndev
->pci_slot
= PCI_SLOT(pci_dev
->devfn
);
788 ndev
->pci_func
= PCI_FUNC(pci_dev
->devfn
);
789 ndev
->board_num
= ndev
->pci_bus
*10 + ndev
->pci_slot
;
790 pci_set_drvdata(pci_dev
, ndev
);
792 dev_info(&pci_dev
->dev
, "%s(): PCI device (%d). Bus:0x%x Slot:0x%x\n",
793 __func__
, ndev
->board_num
, ndev
->pci_bus
, ndev
->pci_slot
);
795 if (pci_enable_device(pci_dev
)) {
796 dev_err(&pci_dev
->dev
, "%s(): pci_enable_device failed\n",
801 pci_read_config_byte(pci_dev
, PCI_CLASS_REVISION
, &board_revision
);
802 pci_read_config_word(pci_dev
, PCI_VENDOR_ID
, &board_vendor
);
803 if (board_vendor
!= NETUP_VENDOR_ID
) {
804 dev_err(&pci_dev
->dev
, "%s(): unknown board vendor 0x%x",
805 __func__
, board_vendor
);
808 dev_info(&pci_dev
->dev
,
809 "%s(): board vendor 0x%x, revision 0x%x\n",
810 __func__
, board_vendor
, board_revision
);
811 pci_set_master(pci_dev
);
812 if (pci_set_dma_mask(pci_dev
, 0xffffffff) < 0) {
813 dev_err(&pci_dev
->dev
,
814 "%s(): 32bit PCI DMA is not supported\n", __func__
);
817 dev_info(&pci_dev
->dev
, "%s(): using 32bit PCI DMA\n", __func__
);
818 /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
819 pcie_capability_clear_and_set_word(pci_dev
, PCI_EXP_DEVCTL
,
820 PCI_EXP_DEVCTL_READRQ
| PCI_EXP_DEVCTL_RELAX_EN
|
821 PCI_EXP_DEVCTL_NOSNOOP_EN
, 0);
822 /* Adjust PCIe completion timeout. */
823 pcie_capability_clear_and_set_word(pci_dev
,
824 PCI_EXP_DEVCTL2
, 0xf, 0x2);
826 if (netup_unidvb_request_mmio(pci_dev
)) {
827 dev_err(&pci_dev
->dev
,
828 "%s(): unable to request MMIO regions\n", __func__
);
831 ndev
->lmmio0
= ioremap(pci_resource_start(pci_dev
, 0),
832 pci_resource_len(pci_dev
, 0));
834 dev_err(&pci_dev
->dev
,
835 "%s(): unable to remap MMIO bar 0\n", __func__
);
838 ndev
->lmmio1
= ioremap(pci_resource_start(pci_dev
, 1),
839 pci_resource_len(pci_dev
, 1));
841 dev_err(&pci_dev
->dev
,
842 "%s(): unable to remap MMIO bar 1\n", __func__
);
845 ndev
->bmmio0
= (u8 __iomem
*)ndev
->lmmio0
;
846 ndev
->bmmio1
= (u8 __iomem
*)ndev
->lmmio1
;
847 dev_info(&pci_dev
->dev
,
848 "%s(): PCI MMIO at 0x%p (%d); 0x%p (%d); IRQ %d",
850 ndev
->lmmio0
, (u32
)pci_resource_len(pci_dev
, 0),
851 ndev
->lmmio1
, (u32
)pci_resource_len(pci_dev
, 1),
853 if (request_irq(pci_dev
->irq
, netup_unidvb_isr
, IRQF_SHARED
,
854 "netup_unidvb", pci_dev
) < 0) {
855 dev_err(&pci_dev
->dev
,
856 "%s(): can't get IRQ %d\n", __func__
, pci_dev
->irq
);
857 goto irq_request_err
;
859 ndev
->dma_size
= 2 * 188 *
860 NETUP_DMA_BLOCKS_COUNT
* NETUP_DMA_PACKETS_COUNT
;
861 ndev
->dma_virt
= dma_alloc_coherent(&pci_dev
->dev
,
862 ndev
->dma_size
, &ndev
->dma_phys
, GFP_KERNEL
);
863 if (!ndev
->dma_virt
) {
864 dev_err(&pci_dev
->dev
, "%s(): unable to allocate DMA buffer\n",
868 netup_unidvb_dev_enable(ndev
);
869 if (spi_enable
&& netup_spi_init(ndev
)) {
870 dev_warn(&pci_dev
->dev
,
871 "netup_unidvb: SPI flash setup failed\n");
875 dev_err(&pci_dev
->dev
,
876 "netup_unidvb: card initialization was incomplete\n");
879 if (netup_i2c_register(ndev
)) {
880 dev_err(&pci_dev
->dev
, "netup_unidvb: I2C setup failed\n");
883 /* enable I2C IRQs */
884 writew(NETUP_UNIDVB_IRQ_I2C0
| NETUP_UNIDVB_IRQ_I2C1
,
885 ndev
->bmmio0
+ REG_IMASK_SET
);
886 usleep_range(5000, 10000);
887 if (netup_unidvb_dvb_setup(ndev
)) {
888 dev_err(&pci_dev
->dev
, "netup_unidvb: DVB setup failed\n");
891 if (netup_unidvb_ci_setup(ndev
, pci_dev
)) {
892 dev_err(&pci_dev
->dev
, "netup_unidvb: CI setup failed\n");
895 if (netup_unidvb_dma_setup(ndev
)) {
896 dev_err(&pci_dev
->dev
, "netup_unidvb: DMA setup failed\n");
899 dev_info(&pci_dev
->dev
,
900 "netup_unidvb: device has been initialized\n");
903 netup_unidvb_ci_unregister(ndev
, 0);
904 netup_unidvb_ci_unregister(ndev
, 1);
906 netup_unidvb_dvb_fini(ndev
, 0);
907 netup_unidvb_dvb_fini(ndev
, 1);
909 netup_i2c_unregister(ndev
);
912 netup_spi_release(ndev
);
914 dma_free_coherent(&pci_dev
->dev
, ndev
->dma_size
,
915 ndev
->dma_virt
, ndev
->dma_phys
);
917 free_irq(pci_dev
->irq
, pci_dev
);
919 iounmap(ndev
->lmmio1
);
921 iounmap(ndev
->lmmio0
);
923 release_mem_region(pci_resource_start(pci_dev
, 0),
924 pci_resource_len(pci_dev
, 0));
925 release_mem_region(pci_resource_start(pci_dev
, 1),
926 pci_resource_len(pci_dev
, 1));
928 pci_disable_device(pci_dev
);
930 pci_set_drvdata(pci_dev
, NULL
);
931 destroy_workqueue(ndev
->wq
);
935 dev_err(&pci_dev
->dev
,
936 "%s(): failed to initizalize device\n", __func__
);
940 static void netup_unidvb_finidev(struct pci_dev
*pci_dev
)
942 struct netup_unidvb_dev
*ndev
= pci_get_drvdata(pci_dev
);
944 dev_info(&pci_dev
->dev
, "%s(): trying to stop device\n", __func__
);
946 netup_unidvb_dma_fini(ndev
, 0);
947 netup_unidvb_dma_fini(ndev
, 1);
948 netup_unidvb_ci_unregister(ndev
, 0);
949 netup_unidvb_ci_unregister(ndev
, 1);
950 netup_unidvb_dvb_fini(ndev
, 0);
951 netup_unidvb_dvb_fini(ndev
, 1);
952 netup_i2c_unregister(ndev
);
955 netup_spi_release(ndev
);
956 writew(0xffff, ndev
->bmmio0
+ REG_IMASK_CLEAR
);
957 dma_free_coherent(&ndev
->pci_dev
->dev
, ndev
->dma_size
,
958 ndev
->dma_virt
, ndev
->dma_phys
);
959 free_irq(pci_dev
->irq
, pci_dev
);
960 iounmap(ndev
->lmmio0
);
961 iounmap(ndev
->lmmio1
);
962 release_mem_region(pci_resource_start(pci_dev
, 0),
963 pci_resource_len(pci_dev
, 0));
964 release_mem_region(pci_resource_start(pci_dev
, 1),
965 pci_resource_len(pci_dev
, 1));
966 pci_disable_device(pci_dev
);
967 pci_set_drvdata(pci_dev
, NULL
);
968 destroy_workqueue(ndev
->wq
);
970 dev_info(&pci_dev
->dev
,
971 "%s(): device has been successfully stopped\n", __func__
);
975 static struct pci_device_id netup_unidvb_pci_tbl
[] = {
976 { PCI_DEVICE(0x1b55, 0x18f6) },
979 MODULE_DEVICE_TABLE(pci
, netup_unidvb_pci_tbl
);
981 static struct pci_driver netup_unidvb_pci_driver
= {
982 .name
= "netup_unidvb",
983 .id_table
= netup_unidvb_pci_tbl
,
984 .probe
= netup_unidvb_initdev
,
985 .remove
= netup_unidvb_finidev
,
990 static int __init
netup_unidvb_init(void)
992 return pci_register_driver(&netup_unidvb_pci_driver
);
995 static void __exit
netup_unidvb_fini(void)
997 pci_unregister_driver(&netup_unidvb_pci_driver
);
1000 module_init(netup_unidvb_init
);
1001 module_exit(netup_unidvb_fini
);