PLIP driver: convert the semaphore killed_timer_sem to completion
[deliverable/linux.git] / arch / powerpc / platforms / pasemi / dma_lib.c
CommitLineData
8ee9d857
OJ
1/*
2 * Copyright (C) 2006-2007 PA Semi, Inc
3 *
4 * Common functions for DMA access on PA Semi PWRficient
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/pci.h>
23#include <linux/of.h>
24
25#include <asm/pasemi_dma.h>
26
27#define MAX_TXCH 64
28#define MAX_RXCH 64
29
30static struct pasdma_status *dma_status;
31
32static void __iomem *iob_regs;
33static void __iomem *mac_regs[6];
34static void __iomem *dma_regs;
35
36static int base_hw_irq;
37
38static int num_txch, num_rxch;
39
40static struct pci_dev *dma_pdev;
41
42/* Bitmaps to handle allocation of channels */
43
44static DECLARE_BITMAP(txch_free, MAX_TXCH);
45static DECLARE_BITMAP(rxch_free, MAX_RXCH);
46
47/* pasemi_read_iob_reg - read IOB register
48 * @reg: Register to read (offset into PCI CFG space)
49 */
50unsigned int pasemi_read_iob_reg(unsigned int reg)
51{
52 return in_le32(iob_regs+reg);
53}
54EXPORT_SYMBOL(pasemi_read_iob_reg);
55
56/* pasemi_write_iob_reg - write IOB register
57 * @reg: Register to write to (offset into PCI CFG space)
58 * @val: Value to write
59 */
60void pasemi_write_iob_reg(unsigned int reg, unsigned int val)
61{
62 out_le32(iob_regs+reg, val);
63}
64EXPORT_SYMBOL(pasemi_write_iob_reg);
65
66/* pasemi_read_mac_reg - read MAC register
67 * @intf: MAC interface
68 * @reg: Register to read (offset into PCI CFG space)
69 */
70unsigned int pasemi_read_mac_reg(int intf, unsigned int reg)
71{
72 return in_le32(mac_regs[intf]+reg);
73}
74EXPORT_SYMBOL(pasemi_read_mac_reg);
75
76/* pasemi_write_mac_reg - write MAC register
77 * @intf: MAC interface
78 * @reg: Register to write to (offset into PCI CFG space)
79 * @val: Value to write
80 */
81void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val)
82{
83 out_le32(mac_regs[intf]+reg, val);
84}
85EXPORT_SYMBOL(pasemi_write_mac_reg);
86
87/* pasemi_read_dma_reg - read DMA register
88 * @reg: Register to read (offset into PCI CFG space)
89 */
90unsigned int pasemi_read_dma_reg(unsigned int reg)
91{
92 return in_le32(dma_regs+reg);
93}
94EXPORT_SYMBOL(pasemi_read_dma_reg);
95
96/* pasemi_write_dma_reg - write DMA register
97 * @reg: Register to write to (offset into PCI CFG space)
98 * @val: Value to write
99 */
100void pasemi_write_dma_reg(unsigned int reg, unsigned int val)
101{
102 out_le32(dma_regs+reg, val);
103}
104EXPORT_SYMBOL(pasemi_write_dma_reg);
105
106static int pasemi_alloc_tx_chan(enum pasemi_dmachan_type type)
107{
108 int bit;
109 int start, limit;
110
111 switch (type & (TXCHAN_EVT0|TXCHAN_EVT1)) {
112 case TXCHAN_EVT0:
113 start = 0;
114 limit = 10;
115 break;
116 case TXCHAN_EVT1:
117 start = 10;
118 limit = MAX_TXCH;
119 break;
120 default:
121 start = 0;
122 limit = MAX_TXCH;
123 break;
124 }
125retry:
126 bit = find_next_bit(txch_free, MAX_TXCH, start);
127 if (bit >= limit)
128 return -ENOSPC;
129 if (!test_and_clear_bit(bit, txch_free))
130 goto retry;
131
132 return bit;
133}
134
135static void pasemi_free_tx_chan(int chan)
136{
137 BUG_ON(test_bit(chan, txch_free));
138 set_bit(chan, txch_free);
139}
140
141static int pasemi_alloc_rx_chan(void)
142{
143 int bit;
144retry:
145 bit = find_first_bit(rxch_free, MAX_RXCH);
146 if (bit >= MAX_TXCH)
147 return -ENOSPC;
148 if (!test_and_clear_bit(bit, rxch_free))
149 goto retry;
150
151 return bit;
152}
153
154static void pasemi_free_rx_chan(int chan)
155{
156 BUG_ON(test_bit(chan, rxch_free));
157 set_bit(chan, rxch_free);
158}
159
160/* pasemi_dma_alloc_chan - Allocate a DMA channel
161 * @type: Type of channel to allocate
162 * @total_size: Total size of structure to allocate (to allow for more
163 * room behind the structure to be used by the client)
164 * @offset: Offset in bytes from start of the total structure to the beginning
165 * of struct pasemi_dmachan. Needed when struct pasemi_dmachan is
166 * not the first member of the client structure.
167 *
168 * pasemi_dma_alloc_chan allocates a DMA channel for use by a client. The
169 * type argument specifies whether it's a RX or TX channel, and in the case
170 * of TX channels which group it needs to belong to (if any).
171 *
172 * Returns a pointer to the total structure allocated on success, NULL
173 * on failure.
174 */
175void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type,
176 int total_size, int offset)
177{
178 void *buf;
179 struct pasemi_dmachan *chan;
180 int chno;
181
182 BUG_ON(total_size < sizeof(struct pasemi_dmachan));
183
184 buf = kzalloc(total_size, GFP_KERNEL);
185
186 if (!buf)
187 return NULL;
188 chan = buf + offset;
189
190 chan->priv = buf;
191
192 switch (type & (TXCHAN|RXCHAN)) {
193 case RXCHAN:
194 chno = pasemi_alloc_rx_chan();
195 chan->chno = chno;
196 chan->irq = irq_create_mapping(NULL,
197 base_hw_irq + num_txch + chno);
198 chan->status = &dma_status->rx_sta[chno];
199 break;
200 case TXCHAN:
201 chno = pasemi_alloc_tx_chan(type);
202 chan->chno = chno;
203 chan->irq = irq_create_mapping(NULL, base_hw_irq + chno);
204 chan->status = &dma_status->tx_sta[chno];
205 break;
206 }
207
208 chan->chan_type = type;
209
210 return chan;
211}
212EXPORT_SYMBOL(pasemi_dma_alloc_chan);
213
214/* pasemi_dma_free_chan - Free a previously allocated channel
215 * @chan: Channel to free
216 *
217 * Frees a previously allocated channel. It will also deallocate any
218 * descriptor ring associated with the channel, if allocated.
219 */
220void pasemi_dma_free_chan(struct pasemi_dmachan *chan)
221{
222 if (chan->ring_virt)
223 pasemi_dma_free_ring(chan);
224
225 switch (chan->chan_type & (RXCHAN|TXCHAN)) {
226 case RXCHAN:
227 pasemi_free_rx_chan(chan->chno);
228 break;
229 case TXCHAN:
230 pasemi_free_tx_chan(chan->chno);
231 break;
232 }
233
234 kfree(chan->priv);
235}
236EXPORT_SYMBOL(pasemi_dma_free_chan);
237
238/* pasemi_dma_alloc_ring - Allocate descriptor ring for a channel
239 * @chan: Channel for which to allocate
240 * @ring_size: Ring size in 64-bit (8-byte) words
241 *
242 * Allocate a descriptor ring for a channel. Returns 0 on success, errno
243 * on failure. The passed in struct pasemi_dmachan is updated with the
244 * virtual and DMA addresses of the ring.
245 */
246int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)
247{
248 BUG_ON(chan->ring_virt);
249
250 chan->ring_size = ring_size;
251
252 chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
253 ring_size * sizeof(u64),
254 &chan->ring_dma, GFP_KERNEL);
255
256 if (!chan->ring_virt)
257 return -ENOMEM;
258
259 memset(chan->ring_virt, 0, ring_size * sizeof(u64));
260
261 return 0;
262}
263EXPORT_SYMBOL(pasemi_dma_alloc_ring);
264
265/* pasemi_dma_free_ring - Free an allocated descriptor ring for a channel
266 * @chan: Channel for which to free the descriptor ring
267 *
268 * Frees a previously allocated descriptor ring for a channel.
269 */
270void pasemi_dma_free_ring(struct pasemi_dmachan *chan)
271{
272 BUG_ON(!chan->ring_virt);
273
274 dma_free_coherent(&dma_pdev->dev, chan->ring_size * sizeof(u64),
275 chan->ring_virt, chan->ring_dma);
276 chan->ring_virt = NULL;
277 chan->ring_size = 0;
278 chan->ring_dma = 0;
279}
280EXPORT_SYMBOL(pasemi_dma_free_ring);
281
282/* pasemi_dma_start_chan - Start a DMA channel
283 * @chan: Channel to start
284 * @cmdsta: Additional CCMDSTA/TCMDSTA bits to write
285 *
286 * Enables (starts) a DMA channel with optional additional arguments.
287 */
288void pasemi_dma_start_chan(const struct pasemi_dmachan *chan, const u32 cmdsta)
289{
290 if (chan->chan_type == RXCHAN)
291 pasemi_write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno),
292 cmdsta | PAS_DMA_RXCHAN_CCMDSTA_EN);
293 else
294 pasemi_write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno),
295 cmdsta | PAS_DMA_TXCHAN_TCMDSTA_EN);
296}
297EXPORT_SYMBOL(pasemi_dma_start_chan);
298
299/* pasemi_dma_stop_chan - Stop a DMA channel
300 * @chan: Channel to stop
301 *
302 * Stops (disables) a DMA channel. This is done by setting the ST bit in the
303 * CMDSTA register and waiting on the ACT (active) bit to clear, then
304 * finally disabling the whole channel.
305 *
306 * This function will only try for a short while for the channel to stop, if
307 * it doesn't it will return failure.
308 *
309 * Returns 1 on success, 0 on failure.
310 */
311#define MAX_RETRIES 5000
312int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan)
313{
314 int reg, retries;
315 u32 sta;
316
317 if (chan->chan_type == RXCHAN) {
318 reg = PAS_DMA_RXCHAN_CCMDSTA(chan->chno);
319 pasemi_write_dma_reg(reg, PAS_DMA_RXCHAN_CCMDSTA_ST);
320 for (retries = 0; retries < MAX_RETRIES; retries++) {
321 sta = pasemi_read_dma_reg(reg);
322 if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) {
323 pasemi_write_dma_reg(reg, 0);
324 return 1;
325 }
326 cond_resched();
327 }
328 } else {
329 reg = PAS_DMA_TXCHAN_TCMDSTA(chan->chno);
330 pasemi_write_dma_reg(reg, PAS_DMA_TXCHAN_TCMDSTA_ST);
331 for (retries = 0; retries < MAX_RETRIES; retries++) {
332 sta = pasemi_read_dma_reg(reg);
333 if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) {
334 pasemi_write_dma_reg(reg, 0);
335 return 1;
336 }
337 cond_resched();
338 }
339 }
340
341 return 0;
342}
343EXPORT_SYMBOL(pasemi_dma_stop_chan);
344
345/* pasemi_dma_alloc_buf - Allocate a buffer to use for DMA
346 * @chan: Channel to allocate for
347 * @size: Size of buffer in bytes
348 * @handle: DMA handle
349 *
350 * Allocate a buffer to be used by the DMA engine for read/write,
351 * similar to dma_alloc_coherent().
352 *
353 * Returns the virtual address of the buffer, or NULL in case of failure.
354 */
355void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
356 dma_addr_t *handle)
357{
358 return dma_alloc_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
359}
360EXPORT_SYMBOL(pasemi_dma_alloc_buf);
361
362/* pasemi_dma_free_buf - Free a buffer used for DMA
363 * @chan: Channel the buffer was allocated for
364 * @size: Size of buffer in bytes
365 * @handle: DMA handle
366 *
367 * Frees a previously allocated buffer.
368 */
369void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
370 dma_addr_t *handle)
371{
372 dma_free_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
373}
374EXPORT_SYMBOL(pasemi_dma_free_buf);
375
376static void *map_onedev(struct pci_dev *p, int index)
377{
378 struct device_node *dn;
379 void __iomem *ret;
380
381 dn = pci_device_to_OF_node(p);
382 if (!dn)
383 goto fallback;
384
385 ret = of_iomap(dn, index);
386 if (!ret)
387 goto fallback;
388
389 return ret;
390fallback:
391 /* This is hardcoded and ugly, but we have some firmware versions
392 * that don't provide the register space in the device tree. Luckily
393 * they are at well-known locations so we can just do the math here.
394 */
395 return ioremap(0xe0000000 + (p->devfn << 12), 0x2000);
396}
397
398/* pasemi_dma_init - Initialize the PA Semi DMA library
399 *
400 * This function initializes the DMA library. It must be called before
401 * any other function in the library.
402 *
403 * Returns 0 on success, errno on failure.
404 */
405int pasemi_dma_init(void)
406{
407 static spinlock_t init_lock = SPIN_LOCK_UNLOCKED;
408 struct pci_dev *iob_pdev;
409 struct pci_dev *pdev;
410 struct resource res;
411 struct device_node *dn;
412 int i, intf, err = 0;
413 u32 tmp;
414
415 if (!machine_is(pasemi))
416 return -ENODEV;
417
418 spin_lock(&init_lock);
419
420 /* Make sure we haven't already initialized */
421 if (dma_pdev)
422 goto out;
423
424 iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
425 if (!iob_pdev) {
426 BUG();
427 printk(KERN_WARNING "Can't find I/O Bridge\n");
428 err = -ENODEV;
429 goto out;
430 }
431 iob_regs = map_onedev(iob_pdev, 0);
432
433 dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
434 if (!dma_pdev) {
435 BUG();
436 printk(KERN_WARNING "Can't find DMA controller\n");
437 err = -ENODEV;
438 goto out;
439 }
440 dma_regs = map_onedev(dma_pdev, 0);
441 base_hw_irq = virq_to_hw(dma_pdev->irq);
442
443 pci_read_config_dword(dma_pdev, PAS_DMA_CAP_TXCH, &tmp);
444 num_txch = (tmp & PAS_DMA_CAP_TXCH_TCHN_M) >> PAS_DMA_CAP_TXCH_TCHN_S;
445
446 pci_read_config_dword(dma_pdev, PAS_DMA_CAP_RXCH, &tmp);
447 num_rxch = (tmp & PAS_DMA_CAP_RXCH_RCHN_M) >> PAS_DMA_CAP_RXCH_RCHN_S;
448
449 intf = 0;
450 for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, NULL);
451 pdev;
452 pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, pdev))
453 mac_regs[intf++] = map_onedev(pdev, 0);
454
455 pci_dev_put(pdev);
456
457 for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, NULL);
458 pdev;
459 pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, pdev))
460 mac_regs[intf++] = map_onedev(pdev, 0);
461
462 pci_dev_put(pdev);
463
464 dn = pci_device_to_OF_node(iob_pdev);
465 if (dn)
466 err = of_address_to_resource(dn, 1, &res);
467 if (!dn || err) {
468 /* Fallback for old firmware */
469 res.start = 0xfd800000;
470 res.end = res.start + 0x1000;
471 }
472 dma_status = __ioremap(res.start, res.end-res.start, 0);
473 pci_dev_put(iob_pdev);
474
475 for (i = 0; i < MAX_TXCH; i++)
476 __set_bit(i, txch_free);
477
478 for (i = 0; i < MAX_RXCH; i++)
479 __set_bit(i, rxch_free);
480
481 printk(KERN_INFO "PA Semi PWRficient DMA library initialized "
482 "(%d tx, %d rx channels)\n", num_txch, num_rxch);
483
484out:
485 spin_unlock(&init_lock);
486 return err;
487}
This page took 0.046766 seconds and 5 git commands to generate.