Staging: VME: move VME drivers out of staging
[deliverable/linux.git] / drivers / vme / bridges / vme_tsi148.c
1 /*
2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/time.h>
30 #include <linux/io.h>
31 #include <linux/uaccess.h>
32 #include <linux/byteorder/generic.h>
33 #include <linux/vme.h>
34
35 #include "../vme_bridge.h"
36 #include "vme_tsi148.h"
37
38 static int __init tsi148_init(void);
39 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
40 static void tsi148_remove(struct pci_dev *);
41 static void __exit tsi148_exit(void);
42
43
44 /* Module parameter */
45 static bool err_chk;
46 static int geoid;
47
48 static const char driver_name[] = "vme_tsi148";
49
50 static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
51 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
52 { },
53 };
54
55 static struct pci_driver tsi148_driver = {
56 .name = driver_name,
57 .id_table = tsi148_ids,
58 .probe = tsi148_probe,
59 .remove = tsi148_remove,
60 };
61
62 static void reg_join(unsigned int high, unsigned int low,
63 unsigned long long *variable)
64 {
65 *variable = (unsigned long long)high << 32;
66 *variable |= (unsigned long long)low;
67 }
68
69 static void reg_split(unsigned long long variable, unsigned int *high,
70 unsigned int *low)
71 {
72 *low = (unsigned int)variable & 0xFFFFFFFF;
73 *high = (unsigned int)(variable >> 32);
74 }
75
76 /*
77 * Wakes up DMA queue.
78 */
79 static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
80 int channel_mask)
81 {
82 u32 serviced = 0;
83
84 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
85 wake_up(&bridge->dma_queue[0]);
86 serviced |= TSI148_LCSR_INTC_DMA0C;
87 }
88 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
89 wake_up(&bridge->dma_queue[1]);
90 serviced |= TSI148_LCSR_INTC_DMA1C;
91 }
92
93 return serviced;
94 }
95
96 /*
97 * Wake up location monitor queue
98 */
99 static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
100 {
101 int i;
102 u32 serviced = 0;
103
104 for (i = 0; i < 4; i++) {
105 if (stat & TSI148_LCSR_INTS_LMS[i]) {
106 /* We only enable interrupts if the callback is set */
107 bridge->lm_callback[i](i);
108 serviced |= TSI148_LCSR_INTC_LMC[i];
109 }
110 }
111
112 return serviced;
113 }
114
115 /*
116 * Wake up mail box queue.
117 *
118 * XXX This functionality is not exposed up though API.
119 */
120 static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
121 {
122 int i;
123 u32 val;
124 u32 serviced = 0;
125 struct tsi148_driver *bridge;
126
127 bridge = tsi148_bridge->driver_priv;
128
129 for (i = 0; i < 4; i++) {
130 if (stat & TSI148_LCSR_INTS_MBS[i]) {
131 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
132 dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
133 ": 0x%x\n", i, val);
134 serviced |= TSI148_LCSR_INTC_MBC[i];
135 }
136 }
137
138 return serviced;
139 }
140
141 /*
142 * Display error & status message when PERR (PCI) exception interrupt occurs.
143 */
144 static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
145 {
146 struct tsi148_driver *bridge;
147
148 bridge = tsi148_bridge->driver_priv;
149
150 dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
151 "attributes: %08x\n",
152 ioread32be(bridge->base + TSI148_LCSR_EDPAU),
153 ioread32be(bridge->base + TSI148_LCSR_EDPAL),
154 ioread32be(bridge->base + TSI148_LCSR_EDPAT));
155
156 dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
157 "completion reg: %08x\n",
158 ioread32be(bridge->base + TSI148_LCSR_EDPXA),
159 ioread32be(bridge->base + TSI148_LCSR_EDPXS));
160
161 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
162
163 return TSI148_LCSR_INTC_PERRC;
164 }
165
166 /*
167 * Save address and status when VME error interrupt occurs.
168 */
169 static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
170 {
171 unsigned int error_addr_high, error_addr_low;
172 unsigned long long error_addr;
173 u32 error_attrib;
174 struct vme_bus_error *error;
175 struct tsi148_driver *bridge;
176
177 bridge = tsi148_bridge->driver_priv;
178
179 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
180 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
181 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
182
183 reg_join(error_addr_high, error_addr_low, &error_addr);
184
185 /* Check for exception register overflow (we have lost error data) */
186 if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
187 dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
188 "Occurred\n");
189 }
190
191 error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC);
192 if (error) {
193 error->address = error_addr;
194 error->attributes = error_attrib;
195 list_add_tail(&error->list, &tsi148_bridge->vme_errors);
196 } else {
197 dev_err(tsi148_bridge->parent, "Unable to alloc memory for "
198 "VMEbus Error reporting\n");
199 dev_err(tsi148_bridge->parent, "VME Bus Error at address: "
200 "0x%llx, attributes: %08x\n", error_addr, error_attrib);
201 }
202
203 /* Clear Status */
204 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
205
206 return TSI148_LCSR_INTC_VERRC;
207 }
208
209 /*
210 * Wake up IACK queue.
211 */
212 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
213 {
214 wake_up(&bridge->iack_queue);
215
216 return TSI148_LCSR_INTC_IACKC;
217 }
218
219 /*
220 * Calling VME bus interrupt callback if provided.
221 */
222 static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
223 u32 stat)
224 {
225 int vec, i, serviced = 0;
226 struct tsi148_driver *bridge;
227
228 bridge = tsi148_bridge->driver_priv;
229
230 for (i = 7; i > 0; i--) {
231 if (stat & (1 << i)) {
232 /*
233 * Note: Even though the registers are defined as
234 * 32-bits in the spec, we only want to issue 8-bit
235 * IACK cycles on the bus, read from offset 3.
236 */
237 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
238
239 vme_irq_handler(tsi148_bridge, i, vec);
240
241 serviced |= (1 << i);
242 }
243 }
244
245 return serviced;
246 }
247
248 /*
249 * Top level interrupt handler. Clears appropriate interrupt status bits and
250 * then calls appropriate sub handler(s).
251 */
252 static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
253 {
254 u32 stat, enable, serviced = 0;
255 struct vme_bridge *tsi148_bridge;
256 struct tsi148_driver *bridge;
257
258 tsi148_bridge = ptr;
259
260 bridge = tsi148_bridge->driver_priv;
261
262 /* Determine which interrupts are unmasked and set */
263 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
264 stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
265
266 /* Only look at unmasked interrupts */
267 stat &= enable;
268
269 if (unlikely(!stat))
270 return IRQ_NONE;
271
272 /* Call subhandlers as appropriate */
273 /* DMA irqs */
274 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
275 serviced |= tsi148_DMA_irqhandler(bridge, stat);
276
277 /* Location monitor irqs */
278 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
279 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
280 serviced |= tsi148_LM_irqhandler(bridge, stat);
281
282 /* Mail box irqs */
283 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
284 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
285 serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
286
287 /* PCI bus error */
288 if (stat & TSI148_LCSR_INTS_PERRS)
289 serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
290
291 /* VME bus error */
292 if (stat & TSI148_LCSR_INTS_VERRS)
293 serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
294
295 /* IACK irq */
296 if (stat & TSI148_LCSR_INTS_IACKS)
297 serviced |= tsi148_IACK_irqhandler(bridge);
298
299 /* VME bus irqs */
300 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
301 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
302 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
303 TSI148_LCSR_INTS_IRQ1S))
304 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
305
306 /* Clear serviced interrupts */
307 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
308
309 return IRQ_HANDLED;
310 }
311
312 static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
313 {
314 int result;
315 unsigned int tmp;
316 struct pci_dev *pdev;
317 struct tsi148_driver *bridge;
318
319 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
320
321 bridge = tsi148_bridge->driver_priv;
322
323 /* Initialise list for VME bus errors */
324 INIT_LIST_HEAD(&tsi148_bridge->vme_errors);
325
326 mutex_init(&tsi148_bridge->irq_mtx);
327
328 result = request_irq(pdev->irq,
329 tsi148_irqhandler,
330 IRQF_SHARED,
331 driver_name, tsi148_bridge);
332 if (result) {
333 dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
334 "vector %02X\n", pdev->irq);
335 return result;
336 }
337
338 /* Enable and unmask interrupts */
339 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
340 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
341 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
342 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
343 TSI148_LCSR_INTEO_IACKEO;
344
345 /* This leaves the following interrupts masked.
346 * TSI148_LCSR_INTEO_VIEEO
347 * TSI148_LCSR_INTEO_SYSFLEO
348 * TSI148_LCSR_INTEO_ACFLEO
349 */
350
351 /* Don't enable Location Monitor interrupts here - they will be
352 * enabled when the location monitors are properly configured and
353 * a callback has been attached.
354 * TSI148_LCSR_INTEO_LM0EO
355 * TSI148_LCSR_INTEO_LM1EO
356 * TSI148_LCSR_INTEO_LM2EO
357 * TSI148_LCSR_INTEO_LM3EO
358 */
359
360 /* Don't enable VME interrupts until we add a handler, else the board
361 * will respond to it and we don't want that unless it knows how to
362 * properly deal with it.
363 * TSI148_LCSR_INTEO_IRQ7EO
364 * TSI148_LCSR_INTEO_IRQ6EO
365 * TSI148_LCSR_INTEO_IRQ5EO
366 * TSI148_LCSR_INTEO_IRQ4EO
367 * TSI148_LCSR_INTEO_IRQ3EO
368 * TSI148_LCSR_INTEO_IRQ2EO
369 * TSI148_LCSR_INTEO_IRQ1EO
370 */
371
372 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
373 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
374
375 return 0;
376 }
377
378 static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
379 struct pci_dev *pdev)
380 {
381 struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
382
383 /* Turn off interrupts */
384 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
385 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
386
387 /* Clear all interrupts */
388 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
389
390 /* Detach interrupt handler */
391 free_irq(pdev->irq, tsi148_bridge);
392 }
393
394 /*
395 * Check to see if an IACk has been received, return true (1) or false (0).
396 */
397 static int tsi148_iack_received(struct tsi148_driver *bridge)
398 {
399 u32 tmp;
400
401 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
402
403 if (tmp & TSI148_LCSR_VICR_IRQS)
404 return 0;
405 else
406 return 1;
407 }
408
409 /*
410 * Configure VME interrupt
411 */
412 static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
413 int state, int sync)
414 {
415 struct pci_dev *pdev;
416 u32 tmp;
417 struct tsi148_driver *bridge;
418
419 bridge = tsi148_bridge->driver_priv;
420
421 /* We need to do the ordering differently for enabling and disabling */
422 if (state == 0) {
423 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
424 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
425 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
426
427 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
428 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
429 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
430
431 if (sync != 0) {
432 pdev = container_of(tsi148_bridge->parent,
433 struct pci_dev, dev);
434
435 synchronize_irq(pdev->irq);
436 }
437 } else {
438 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
439 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
440 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
441
442 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
443 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
444 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
445 }
446 }
447
448 /*
449 * Generate a VME bus interrupt at the requested level & vector. Wait for
450 * interrupt to be acked.
451 */
452 static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
453 int statid)
454 {
455 u32 tmp;
456 struct tsi148_driver *bridge;
457
458 bridge = tsi148_bridge->driver_priv;
459
460 mutex_lock(&bridge->vme_int);
461
462 /* Read VICR register */
463 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
464
465 /* Set Status/ID */
466 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
467 (statid & TSI148_LCSR_VICR_STID_M);
468 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
469
470 /* Assert VMEbus IRQ */
471 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
472 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
473
474 /* XXX Consider implementing a timeout? */
475 wait_event_interruptible(bridge->iack_queue,
476 tsi148_iack_received(bridge));
477
478 mutex_unlock(&bridge->vme_int);
479
480 return 0;
481 }
482
483 /*
484 * Find the first error in this address range
485 */
486 static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
487 u32 aspace, unsigned long long address, size_t count)
488 {
489 struct list_head *err_pos;
490 struct vme_bus_error *vme_err, *valid = NULL;
491 unsigned long long bound;
492
493 bound = address + count;
494
495 /*
496 * XXX We are currently not looking at the address space when parsing
497 * for errors. This is because parsing the Address Modifier Codes
498 * is going to be quite resource intensive to do properly. We
499 * should be OK just looking at the addresses and this is certainly
500 * much better than what we had before.
501 */
502 err_pos = NULL;
503 /* Iterate through errors */
504 list_for_each(err_pos, &tsi148_bridge->vme_errors) {
505 vme_err = list_entry(err_pos, struct vme_bus_error, list);
506 if ((vme_err->address >= address) &&
507 (vme_err->address < bound)) {
508
509 valid = vme_err;
510 break;
511 }
512 }
513
514 return valid;
515 }
516
517 /*
518 * Clear errors in the provided address range.
519 */
520 static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
521 u32 aspace, unsigned long long address, size_t count)
522 {
523 struct list_head *err_pos, *temp;
524 struct vme_bus_error *vme_err;
525 unsigned long long bound;
526
527 bound = address + count;
528
529 /*
530 * XXX We are currently not looking at the address space when parsing
531 * for errors. This is because parsing the Address Modifier Codes
532 * is going to be quite resource intensive to do properly. We
533 * should be OK just looking at the addresses and this is certainly
534 * much better than what we had before.
535 */
536 err_pos = NULL;
537 /* Iterate through errors */
538 list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) {
539 vme_err = list_entry(err_pos, struct vme_bus_error, list);
540
541 if ((vme_err->address >= address) &&
542 (vme_err->address < bound)) {
543
544 list_del(err_pos);
545 kfree(vme_err);
546 }
547 }
548 }
549
550 /*
551 * Initialize a slave window with the requested attributes.
552 */
553 static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
554 unsigned long long vme_base, unsigned long long size,
555 dma_addr_t pci_base, u32 aspace, u32 cycle)
556 {
557 unsigned int i, addr = 0, granularity = 0;
558 unsigned int temp_ctl = 0;
559 unsigned int vme_base_low, vme_base_high;
560 unsigned int vme_bound_low, vme_bound_high;
561 unsigned int pci_offset_low, pci_offset_high;
562 unsigned long long vme_bound, pci_offset;
563 struct vme_bridge *tsi148_bridge;
564 struct tsi148_driver *bridge;
565
566 tsi148_bridge = image->parent;
567 bridge = tsi148_bridge->driver_priv;
568
569 i = image->number;
570
571 switch (aspace) {
572 case VME_A16:
573 granularity = 0x10;
574 addr |= TSI148_LCSR_ITAT_AS_A16;
575 break;
576 case VME_A24:
577 granularity = 0x1000;
578 addr |= TSI148_LCSR_ITAT_AS_A24;
579 break;
580 case VME_A32:
581 granularity = 0x10000;
582 addr |= TSI148_LCSR_ITAT_AS_A32;
583 break;
584 case VME_A64:
585 granularity = 0x10000;
586 addr |= TSI148_LCSR_ITAT_AS_A64;
587 break;
588 case VME_CRCSR:
589 case VME_USER1:
590 case VME_USER2:
591 case VME_USER3:
592 case VME_USER4:
593 default:
594 dev_err(tsi148_bridge->parent, "Invalid address space\n");
595 return -EINVAL;
596 break;
597 }
598
599 /* Convert 64-bit variables to 2x 32-bit variables */
600 reg_split(vme_base, &vme_base_high, &vme_base_low);
601
602 /*
603 * Bound address is a valid address for the window, adjust
604 * accordingly
605 */
606 vme_bound = vme_base + size - granularity;
607 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
608 pci_offset = (unsigned long long)pci_base - vme_base;
609 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
610
611 if (vme_base_low & (granularity - 1)) {
612 dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
613 return -EINVAL;
614 }
615 if (vme_bound_low & (granularity - 1)) {
616 dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
617 return -EINVAL;
618 }
619 if (pci_offset_low & (granularity - 1)) {
620 dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
621 "alignment\n");
622 return -EINVAL;
623 }
624
625 /* Disable while we are mucking around */
626 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
627 TSI148_LCSR_OFFSET_ITAT);
628 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
629 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
630 TSI148_LCSR_OFFSET_ITAT);
631
632 /* Setup mapping */
633 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
634 TSI148_LCSR_OFFSET_ITSAU);
635 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
636 TSI148_LCSR_OFFSET_ITSAL);
637 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
638 TSI148_LCSR_OFFSET_ITEAU);
639 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
640 TSI148_LCSR_OFFSET_ITEAL);
641 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
642 TSI148_LCSR_OFFSET_ITOFU);
643 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
644 TSI148_LCSR_OFFSET_ITOFL);
645
646 /* Setup 2eSST speeds */
647 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
648 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
649 case VME_2eSST160:
650 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
651 break;
652 case VME_2eSST267:
653 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
654 break;
655 case VME_2eSST320:
656 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
657 break;
658 }
659
660 /* Setup cycle types */
661 temp_ctl &= ~(0x1F << 7);
662 if (cycle & VME_BLT)
663 temp_ctl |= TSI148_LCSR_ITAT_BLT;
664 if (cycle & VME_MBLT)
665 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
666 if (cycle & VME_2eVME)
667 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
668 if (cycle & VME_2eSST)
669 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
670 if (cycle & VME_2eSSTB)
671 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
672
673 /* Setup address space */
674 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
675 temp_ctl |= addr;
676
677 temp_ctl &= ~0xF;
678 if (cycle & VME_SUPER)
679 temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
680 if (cycle & VME_USER)
681 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
682 if (cycle & VME_PROG)
683 temp_ctl |= TSI148_LCSR_ITAT_PGM;
684 if (cycle & VME_DATA)
685 temp_ctl |= TSI148_LCSR_ITAT_DATA;
686
687 /* Write ctl reg without enable */
688 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
689 TSI148_LCSR_OFFSET_ITAT);
690
691 if (enabled)
692 temp_ctl |= TSI148_LCSR_ITAT_EN;
693
694 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
695 TSI148_LCSR_OFFSET_ITAT);
696
697 return 0;
698 }
699
700 /*
701 * Get slave window configuration.
702 */
703 static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
704 unsigned long long *vme_base, unsigned long long *size,
705 dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
706 {
707 unsigned int i, granularity = 0, ctl = 0;
708 unsigned int vme_base_low, vme_base_high;
709 unsigned int vme_bound_low, vme_bound_high;
710 unsigned int pci_offset_low, pci_offset_high;
711 unsigned long long vme_bound, pci_offset;
712 struct tsi148_driver *bridge;
713
714 bridge = image->parent->driver_priv;
715
716 i = image->number;
717
718 /* Read registers */
719 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
720 TSI148_LCSR_OFFSET_ITAT);
721
722 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
723 TSI148_LCSR_OFFSET_ITSAU);
724 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
725 TSI148_LCSR_OFFSET_ITSAL);
726 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
727 TSI148_LCSR_OFFSET_ITEAU);
728 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
729 TSI148_LCSR_OFFSET_ITEAL);
730 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
731 TSI148_LCSR_OFFSET_ITOFU);
732 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
733 TSI148_LCSR_OFFSET_ITOFL);
734
735 /* Convert 64-bit variables to 2x 32-bit variables */
736 reg_join(vme_base_high, vme_base_low, vme_base);
737 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
738 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
739
740 *pci_base = (dma_addr_t)vme_base + pci_offset;
741
742 *enabled = 0;
743 *aspace = 0;
744 *cycle = 0;
745
746 if (ctl & TSI148_LCSR_ITAT_EN)
747 *enabled = 1;
748
749 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
750 granularity = 0x10;
751 *aspace |= VME_A16;
752 }
753 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
754 granularity = 0x1000;
755 *aspace |= VME_A24;
756 }
757 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
758 granularity = 0x10000;
759 *aspace |= VME_A32;
760 }
761 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
762 granularity = 0x10000;
763 *aspace |= VME_A64;
764 }
765
766 /* Need granularity before we set the size */
767 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
768
769
770 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
771 *cycle |= VME_2eSST160;
772 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
773 *cycle |= VME_2eSST267;
774 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
775 *cycle |= VME_2eSST320;
776
777 if (ctl & TSI148_LCSR_ITAT_BLT)
778 *cycle |= VME_BLT;
779 if (ctl & TSI148_LCSR_ITAT_MBLT)
780 *cycle |= VME_MBLT;
781 if (ctl & TSI148_LCSR_ITAT_2eVME)
782 *cycle |= VME_2eVME;
783 if (ctl & TSI148_LCSR_ITAT_2eSST)
784 *cycle |= VME_2eSST;
785 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
786 *cycle |= VME_2eSSTB;
787
788 if (ctl & TSI148_LCSR_ITAT_SUPR)
789 *cycle |= VME_SUPER;
790 if (ctl & TSI148_LCSR_ITAT_NPRIV)
791 *cycle |= VME_USER;
792 if (ctl & TSI148_LCSR_ITAT_PGM)
793 *cycle |= VME_PROG;
794 if (ctl & TSI148_LCSR_ITAT_DATA)
795 *cycle |= VME_DATA;
796
797 return 0;
798 }
799
800 /*
801 * Allocate and map PCI Resource
802 */
803 static int tsi148_alloc_resource(struct vme_master_resource *image,
804 unsigned long long size)
805 {
806 unsigned long long existing_size;
807 int retval = 0;
808 struct pci_dev *pdev;
809 struct vme_bridge *tsi148_bridge;
810
811 tsi148_bridge = image->parent;
812
813 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
814
815 existing_size = (unsigned long long)(image->bus_resource.end -
816 image->bus_resource.start);
817
818 /* If the existing size is OK, return */
819 if ((size != 0) && (existing_size == (size - 1)))
820 return 0;
821
822 if (existing_size != 0) {
823 iounmap(image->kern_base);
824 image->kern_base = NULL;
825 kfree(image->bus_resource.name);
826 release_resource(&image->bus_resource);
827 memset(&image->bus_resource, 0, sizeof(struct resource));
828 }
829
830 /* Exit here if size is zero */
831 if (size == 0)
832 return 0;
833
834 if (image->bus_resource.name == NULL) {
835 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
836 if (image->bus_resource.name == NULL) {
837 dev_err(tsi148_bridge->parent, "Unable to allocate "
838 "memory for resource name\n");
839 retval = -ENOMEM;
840 goto err_name;
841 }
842 }
843
844 sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
845 image->number);
846
847 image->bus_resource.start = 0;
848 image->bus_resource.end = (unsigned long)size;
849 image->bus_resource.flags = IORESOURCE_MEM;
850
851 retval = pci_bus_alloc_resource(pdev->bus,
852 &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
853 0, NULL, NULL);
854 if (retval) {
855 dev_err(tsi148_bridge->parent, "Failed to allocate mem "
856 "resource for window %d size 0x%lx start 0x%lx\n",
857 image->number, (unsigned long)size,
858 (unsigned long)image->bus_resource.start);
859 goto err_resource;
860 }
861
862 image->kern_base = ioremap_nocache(
863 image->bus_resource.start, size);
864 if (image->kern_base == NULL) {
865 dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
866 retval = -ENOMEM;
867 goto err_remap;
868 }
869
870 return 0;
871
872 err_remap:
873 release_resource(&image->bus_resource);
874 err_resource:
875 kfree(image->bus_resource.name);
876 memset(&image->bus_resource, 0, sizeof(struct resource));
877 err_name:
878 return retval;
879 }
880
881 /*
882 * Free and unmap PCI Resource
883 */
884 static void tsi148_free_resource(struct vme_master_resource *image)
885 {
886 iounmap(image->kern_base);
887 image->kern_base = NULL;
888 release_resource(&image->bus_resource);
889 kfree(image->bus_resource.name);
890 memset(&image->bus_resource, 0, sizeof(struct resource));
891 }
892
893 /*
894 * Set the attributes of an outbound window.
895 */
896 static int tsi148_master_set(struct vme_master_resource *image, int enabled,
897 unsigned long long vme_base, unsigned long long size, u32 aspace,
898 u32 cycle, u32 dwidth)
899 {
900 int retval = 0;
901 unsigned int i;
902 unsigned int temp_ctl = 0;
903 unsigned int pci_base_low, pci_base_high;
904 unsigned int pci_bound_low, pci_bound_high;
905 unsigned int vme_offset_low, vme_offset_high;
906 unsigned long long pci_bound, vme_offset, pci_base;
907 struct vme_bridge *tsi148_bridge;
908 struct tsi148_driver *bridge;
909
910 tsi148_bridge = image->parent;
911
912 bridge = tsi148_bridge->driver_priv;
913
914 /* Verify input data */
915 if (vme_base & 0xFFFF) {
916 dev_err(tsi148_bridge->parent, "Invalid VME Window "
917 "alignment\n");
918 retval = -EINVAL;
919 goto err_window;
920 }
921
922 if ((size == 0) && (enabled != 0)) {
923 dev_err(tsi148_bridge->parent, "Size must be non-zero for "
924 "enabled windows\n");
925 retval = -EINVAL;
926 goto err_window;
927 }
928
929 spin_lock(&image->lock);
930
931 /* Let's allocate the resource here rather than further up the stack as
932 * it avoids pushing loads of bus dependent stuff up the stack. If size
933 * is zero, any existing resource will be freed.
934 */
935 retval = tsi148_alloc_resource(image, size);
936 if (retval) {
937 spin_unlock(&image->lock);
938 dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
939 "resource\n");
940 goto err_res;
941 }
942
943 if (size == 0) {
944 pci_base = 0;
945 pci_bound = 0;
946 vme_offset = 0;
947 } else {
948 pci_base = (unsigned long long)image->bus_resource.start;
949
950 /*
951 * Bound address is a valid address for the window, adjust
952 * according to window granularity.
953 */
954 pci_bound = pci_base + (size - 0x10000);
955 vme_offset = vme_base - pci_base;
956 }
957
958 /* Convert 64-bit variables to 2x 32-bit variables */
959 reg_split(pci_base, &pci_base_high, &pci_base_low);
960 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
961 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
962
963 if (pci_base_low & 0xFFFF) {
964 spin_unlock(&image->lock);
965 dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
966 retval = -EINVAL;
967 goto err_gran;
968 }
969 if (pci_bound_low & 0xFFFF) {
970 spin_unlock(&image->lock);
971 dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
972 retval = -EINVAL;
973 goto err_gran;
974 }
975 if (vme_offset_low & 0xFFFF) {
976 spin_unlock(&image->lock);
977 dev_err(tsi148_bridge->parent, "Invalid VME Offset "
978 "alignment\n");
979 retval = -EINVAL;
980 goto err_gran;
981 }
982
983 i = image->number;
984
985 /* Disable while we are mucking around */
986 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
987 TSI148_LCSR_OFFSET_OTAT);
988 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
989 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
990 TSI148_LCSR_OFFSET_OTAT);
991
992 /* Setup 2eSST speeds */
993 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
994 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
995 case VME_2eSST160:
996 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
997 break;
998 case VME_2eSST267:
999 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
1000 break;
1001 case VME_2eSST320:
1002 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
1003 break;
1004 }
1005
1006 /* Setup cycle types */
1007 if (cycle & VME_BLT) {
1008 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1009 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
1010 }
1011 if (cycle & VME_MBLT) {
1012 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1013 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
1014 }
1015 if (cycle & VME_2eVME) {
1016 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1017 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
1018 }
1019 if (cycle & VME_2eSST) {
1020 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1021 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
1022 }
1023 if (cycle & VME_2eSSTB) {
1024 dev_warn(tsi148_bridge->parent, "Currently not setting "
1025 "Broadcast Select Registers\n");
1026 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1027 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
1028 }
1029
1030 /* Setup data width */
1031 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
1032 switch (dwidth) {
1033 case VME_D16:
1034 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
1035 break;
1036 case VME_D32:
1037 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
1038 break;
1039 default:
1040 spin_unlock(&image->lock);
1041 dev_err(tsi148_bridge->parent, "Invalid data width\n");
1042 retval = -EINVAL;
1043 goto err_dwidth;
1044 }
1045
1046 /* Setup address space */
1047 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
1048 switch (aspace) {
1049 case VME_A16:
1050 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
1051 break;
1052 case VME_A24:
1053 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
1054 break;
1055 case VME_A32:
1056 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
1057 break;
1058 case VME_A64:
1059 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
1060 break;
1061 case VME_CRCSR:
1062 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
1063 break;
1064 case VME_USER1:
1065 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
1066 break;
1067 case VME_USER2:
1068 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
1069 break;
1070 case VME_USER3:
1071 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
1072 break;
1073 case VME_USER4:
1074 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
1075 break;
1076 default:
1077 spin_unlock(&image->lock);
1078 dev_err(tsi148_bridge->parent, "Invalid address space\n");
1079 retval = -EINVAL;
1080 goto err_aspace;
1081 break;
1082 }
1083
1084 temp_ctl &= ~(3<<4);
1085 if (cycle & VME_SUPER)
1086 temp_ctl |= TSI148_LCSR_OTAT_SUP;
1087 if (cycle & VME_PROG)
1088 temp_ctl |= TSI148_LCSR_OTAT_PGM;
1089
1090 /* Setup mapping */
1091 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
1092 TSI148_LCSR_OFFSET_OTSAU);
1093 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
1094 TSI148_LCSR_OFFSET_OTSAL);
1095 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1096 TSI148_LCSR_OFFSET_OTEAU);
1097 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1098 TSI148_LCSR_OFFSET_OTEAL);
1099 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1100 TSI148_LCSR_OFFSET_OTOFU);
1101 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1102 TSI148_LCSR_OFFSET_OTOFL);
1103
1104 /* Write ctl reg without enable */
1105 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1106 TSI148_LCSR_OFFSET_OTAT);
1107
1108 if (enabled)
1109 temp_ctl |= TSI148_LCSR_OTAT_EN;
1110
1111 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1112 TSI148_LCSR_OFFSET_OTAT);
1113
1114 spin_unlock(&image->lock);
1115 return 0;
1116
1117 err_aspace:
1118 err_dwidth:
1119 err_gran:
1120 tsi148_free_resource(image);
1121 err_res:
1122 err_window:
1123 return retval;
1124
1125 }
1126
1127 /*
1128 * Set the attributes of an outbound window.
1129 *
1130 * XXX Not parsing prefetch information.
1131 */
1132 static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
1133 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1134 u32 *cycle, u32 *dwidth)
1135 {
1136 unsigned int i, ctl;
1137 unsigned int pci_base_low, pci_base_high;
1138 unsigned int pci_bound_low, pci_bound_high;
1139 unsigned int vme_offset_low, vme_offset_high;
1140
1141 unsigned long long pci_base, pci_bound, vme_offset;
1142 struct tsi148_driver *bridge;
1143
1144 bridge = image->parent->driver_priv;
1145
1146 i = image->number;
1147
1148 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1149 TSI148_LCSR_OFFSET_OTAT);
1150
1151 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1152 TSI148_LCSR_OFFSET_OTSAU);
1153 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1154 TSI148_LCSR_OFFSET_OTSAL);
1155 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1156 TSI148_LCSR_OFFSET_OTEAU);
1157 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1158 TSI148_LCSR_OFFSET_OTEAL);
1159 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1160 TSI148_LCSR_OFFSET_OTOFU);
1161 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1162 TSI148_LCSR_OFFSET_OTOFL);
1163
1164 /* Convert 64-bit variables to 2x 32-bit variables */
1165 reg_join(pci_base_high, pci_base_low, &pci_base);
1166 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1167 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1168
1169 *vme_base = pci_base + vme_offset;
1170 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1171
1172 *enabled = 0;
1173 *aspace = 0;
1174 *cycle = 0;
1175 *dwidth = 0;
1176
1177 if (ctl & TSI148_LCSR_OTAT_EN)
1178 *enabled = 1;
1179
1180 /* Setup address space */
1181 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1182 *aspace |= VME_A16;
1183 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1184 *aspace |= VME_A24;
1185 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1186 *aspace |= VME_A32;
1187 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1188 *aspace |= VME_A64;
1189 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1190 *aspace |= VME_CRCSR;
1191 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1192 *aspace |= VME_USER1;
1193 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1194 *aspace |= VME_USER2;
1195 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1196 *aspace |= VME_USER3;
1197 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1198 *aspace |= VME_USER4;
1199
1200 /* Setup 2eSST speeds */
1201 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1202 *cycle |= VME_2eSST160;
1203 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1204 *cycle |= VME_2eSST267;
1205 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1206 *cycle |= VME_2eSST320;
1207
1208 /* Setup cycle types */
1209 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
1210 *cycle |= VME_SCT;
1211 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
1212 *cycle |= VME_BLT;
1213 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
1214 *cycle |= VME_MBLT;
1215 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
1216 *cycle |= VME_2eVME;
1217 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
1218 *cycle |= VME_2eSST;
1219 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
1220 *cycle |= VME_2eSSTB;
1221
1222 if (ctl & TSI148_LCSR_OTAT_SUP)
1223 *cycle |= VME_SUPER;
1224 else
1225 *cycle |= VME_USER;
1226
1227 if (ctl & TSI148_LCSR_OTAT_PGM)
1228 *cycle |= VME_PROG;
1229 else
1230 *cycle |= VME_DATA;
1231
1232 /* Setup data width */
1233 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1234 *dwidth = VME_D16;
1235 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1236 *dwidth = VME_D32;
1237
1238 return 0;
1239 }
1240
1241
1242 static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
1243 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1244 u32 *cycle, u32 *dwidth)
1245 {
1246 int retval;
1247
1248 spin_lock(&image->lock);
1249
1250 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1251 cycle, dwidth);
1252
1253 spin_unlock(&image->lock);
1254
1255 return retval;
1256 }
1257
1258 static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1259 size_t count, loff_t offset)
1260 {
1261 int retval, enabled;
1262 unsigned long long vme_base, size;
1263 u32 aspace, cycle, dwidth;
1264 struct vme_bus_error *vme_err = NULL;
1265 struct vme_bridge *tsi148_bridge;
1266
1267 tsi148_bridge = image->parent;
1268
1269 spin_lock(&image->lock);
1270
1271 memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
1272 retval = count;
1273
1274 if (!err_chk)
1275 goto skip_chk;
1276
1277 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1278 &dwidth);
1279
1280 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1281 count);
1282 if (vme_err != NULL) {
1283 dev_err(image->parent->parent, "First VME read error detected "
1284 "an at address 0x%llx\n", vme_err->address);
1285 retval = vme_err->address - (vme_base + offset);
1286 /* Clear down save errors in this address range */
1287 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1288 count);
1289 }
1290
1291 skip_chk:
1292 spin_unlock(&image->lock);
1293
1294 return retval;
1295 }
1296
1297
1298 static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1299 size_t count, loff_t offset)
1300 {
1301 int retval = 0, enabled;
1302 unsigned long long vme_base, size;
1303 u32 aspace, cycle, dwidth;
1304
1305 struct vme_bus_error *vme_err = NULL;
1306 struct vme_bridge *tsi148_bridge;
1307 struct tsi148_driver *bridge;
1308
1309 tsi148_bridge = image->parent;
1310
1311 bridge = tsi148_bridge->driver_priv;
1312
1313 spin_lock(&image->lock);
1314
1315 memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
1316 retval = count;
1317
1318 /*
1319 * Writes are posted. We need to do a read on the VME bus to flush out
1320 * all of the writes before we check for errors. We can't guarantee
1321 * that reading the data we have just written is safe. It is believed
1322 * that there isn't any read, write re-ordering, so we can read any
1323 * location in VME space, so lets read the Device ID from the tsi148's
1324 * own registers as mapped into CR/CSR space.
1325 *
1326 * We check for saved errors in the written address range/space.
1327 */
1328
1329 if (!err_chk)
1330 goto skip_chk;
1331
1332 /*
1333 * Get window info first, to maximise the time that the buffers may
1334 * fluch on their own
1335 */
1336 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1337 &dwidth);
1338
1339 ioread16(bridge->flush_image->kern_base + 0x7F000);
1340
1341 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1342 count);
1343 if (vme_err != NULL) {
1344 dev_warn(tsi148_bridge->parent, "First VME write error detected"
1345 " an at address 0x%llx\n", vme_err->address);
1346 retval = vme_err->address - (vme_base + offset);
1347 /* Clear down save errors in this address range */
1348 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1349 count);
1350 }
1351
1352 skip_chk:
1353 spin_unlock(&image->lock);
1354
1355 return retval;
1356 }
1357
1358 /*
1359 * Perform an RMW cycle on the VME bus.
1360 *
1361 * Requires a previously configured master window, returns final value.
1362 */
1363 static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1364 unsigned int mask, unsigned int compare, unsigned int swap,
1365 loff_t offset)
1366 {
1367 unsigned long long pci_addr;
1368 unsigned int pci_addr_high, pci_addr_low;
1369 u32 tmp, result;
1370 int i;
1371 struct tsi148_driver *bridge;
1372
1373 bridge = image->parent->driver_priv;
1374
1375 /* Find the PCI address that maps to the desired VME address */
1376 i = image->number;
1377
1378 /* Locking as we can only do one of these at a time */
1379 mutex_lock(&bridge->vme_rmw);
1380
1381 /* Lock image */
1382 spin_lock(&image->lock);
1383
1384 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1385 TSI148_LCSR_OFFSET_OTSAU);
1386 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1387 TSI148_LCSR_OFFSET_OTSAL);
1388
1389 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1390 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1391
1392 /* Configure registers */
1393 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1394 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1395 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1396 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1397 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1398
1399 /* Enable RMW */
1400 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1401 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1402 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1403
1404 /* Kick process off with a read to the required address. */
1405 result = ioread32be(image->kern_base + offset);
1406
1407 /* Disable RMW */
1408 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1409 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1410 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1411
1412 spin_unlock(&image->lock);
1413
1414 mutex_unlock(&bridge->vme_rmw);
1415
1416 return result;
1417 }
1418
1419 static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
1420 u32 aspace, u32 cycle, u32 dwidth)
1421 {
1422 u32 val;
1423
1424 val = be32_to_cpu(*attr);
1425
1426 /* Setup 2eSST speeds */
1427 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1428 case VME_2eSST160:
1429 val |= TSI148_LCSR_DSAT_2eSSTM_160;
1430 break;
1431 case VME_2eSST267:
1432 val |= TSI148_LCSR_DSAT_2eSSTM_267;
1433 break;
1434 case VME_2eSST320:
1435 val |= TSI148_LCSR_DSAT_2eSSTM_320;
1436 break;
1437 }
1438
1439 /* Setup cycle types */
1440 if (cycle & VME_SCT)
1441 val |= TSI148_LCSR_DSAT_TM_SCT;
1442
1443 if (cycle & VME_BLT)
1444 val |= TSI148_LCSR_DSAT_TM_BLT;
1445
1446 if (cycle & VME_MBLT)
1447 val |= TSI148_LCSR_DSAT_TM_MBLT;
1448
1449 if (cycle & VME_2eVME)
1450 val |= TSI148_LCSR_DSAT_TM_2eVME;
1451
1452 if (cycle & VME_2eSST)
1453 val |= TSI148_LCSR_DSAT_TM_2eSST;
1454
1455 if (cycle & VME_2eSSTB) {
1456 dev_err(dev, "Currently not setting Broadcast Select "
1457 "Registers\n");
1458 val |= TSI148_LCSR_DSAT_TM_2eSSTB;
1459 }
1460
1461 /* Setup data width */
1462 switch (dwidth) {
1463 case VME_D16:
1464 val |= TSI148_LCSR_DSAT_DBW_16;
1465 break;
1466 case VME_D32:
1467 val |= TSI148_LCSR_DSAT_DBW_32;
1468 break;
1469 default:
1470 dev_err(dev, "Invalid data width\n");
1471 return -EINVAL;
1472 }
1473
1474 /* Setup address space */
1475 switch (aspace) {
1476 case VME_A16:
1477 val |= TSI148_LCSR_DSAT_AMODE_A16;
1478 break;
1479 case VME_A24:
1480 val |= TSI148_LCSR_DSAT_AMODE_A24;
1481 break;
1482 case VME_A32:
1483 val |= TSI148_LCSR_DSAT_AMODE_A32;
1484 break;
1485 case VME_A64:
1486 val |= TSI148_LCSR_DSAT_AMODE_A64;
1487 break;
1488 case VME_CRCSR:
1489 val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1490 break;
1491 case VME_USER1:
1492 val |= TSI148_LCSR_DSAT_AMODE_USER1;
1493 break;
1494 case VME_USER2:
1495 val |= TSI148_LCSR_DSAT_AMODE_USER2;
1496 break;
1497 case VME_USER3:
1498 val |= TSI148_LCSR_DSAT_AMODE_USER3;
1499 break;
1500 case VME_USER4:
1501 val |= TSI148_LCSR_DSAT_AMODE_USER4;
1502 break;
1503 default:
1504 dev_err(dev, "Invalid address space\n");
1505 return -EINVAL;
1506 break;
1507 }
1508
1509 if (cycle & VME_SUPER)
1510 val |= TSI148_LCSR_DSAT_SUP;
1511 if (cycle & VME_PROG)
1512 val |= TSI148_LCSR_DSAT_PGM;
1513
1514 *attr = cpu_to_be32(val);
1515
1516 return 0;
1517 }
1518
1519 static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
1520 u32 aspace, u32 cycle, u32 dwidth)
1521 {
1522 u32 val;
1523
1524 val = be32_to_cpu(*attr);
1525
1526 /* Setup 2eSST speeds */
1527 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1528 case VME_2eSST160:
1529 val |= TSI148_LCSR_DDAT_2eSSTM_160;
1530 break;
1531 case VME_2eSST267:
1532 val |= TSI148_LCSR_DDAT_2eSSTM_267;
1533 break;
1534 case VME_2eSST320:
1535 val |= TSI148_LCSR_DDAT_2eSSTM_320;
1536 break;
1537 }
1538
1539 /* Setup cycle types */
1540 if (cycle & VME_SCT)
1541 val |= TSI148_LCSR_DDAT_TM_SCT;
1542
1543 if (cycle & VME_BLT)
1544 val |= TSI148_LCSR_DDAT_TM_BLT;
1545
1546 if (cycle & VME_MBLT)
1547 val |= TSI148_LCSR_DDAT_TM_MBLT;
1548
1549 if (cycle & VME_2eVME)
1550 val |= TSI148_LCSR_DDAT_TM_2eVME;
1551
1552 if (cycle & VME_2eSST)
1553 val |= TSI148_LCSR_DDAT_TM_2eSST;
1554
1555 if (cycle & VME_2eSSTB) {
1556 dev_err(dev, "Currently not setting Broadcast Select "
1557 "Registers\n");
1558 val |= TSI148_LCSR_DDAT_TM_2eSSTB;
1559 }
1560
1561 /* Setup data width */
1562 switch (dwidth) {
1563 case VME_D16:
1564 val |= TSI148_LCSR_DDAT_DBW_16;
1565 break;
1566 case VME_D32:
1567 val |= TSI148_LCSR_DDAT_DBW_32;
1568 break;
1569 default:
1570 dev_err(dev, "Invalid data width\n");
1571 return -EINVAL;
1572 }
1573
1574 /* Setup address space */
1575 switch (aspace) {
1576 case VME_A16:
1577 val |= TSI148_LCSR_DDAT_AMODE_A16;
1578 break;
1579 case VME_A24:
1580 val |= TSI148_LCSR_DDAT_AMODE_A24;
1581 break;
1582 case VME_A32:
1583 val |= TSI148_LCSR_DDAT_AMODE_A32;
1584 break;
1585 case VME_A64:
1586 val |= TSI148_LCSR_DDAT_AMODE_A64;
1587 break;
1588 case VME_CRCSR:
1589 val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1590 break;
1591 case VME_USER1:
1592 val |= TSI148_LCSR_DDAT_AMODE_USER1;
1593 break;
1594 case VME_USER2:
1595 val |= TSI148_LCSR_DDAT_AMODE_USER2;
1596 break;
1597 case VME_USER3:
1598 val |= TSI148_LCSR_DDAT_AMODE_USER3;
1599 break;
1600 case VME_USER4:
1601 val |= TSI148_LCSR_DDAT_AMODE_USER4;
1602 break;
1603 default:
1604 dev_err(dev, "Invalid address space\n");
1605 return -EINVAL;
1606 break;
1607 }
1608
1609 if (cycle & VME_SUPER)
1610 val |= TSI148_LCSR_DDAT_SUP;
1611 if (cycle & VME_PROG)
1612 val |= TSI148_LCSR_DDAT_PGM;
1613
1614 *attr = cpu_to_be32(val);
1615
1616 return 0;
1617 }
1618
1619 /*
1620 * Add a link list descriptor to the list
1621 *
1622 * Note: DMA engine expects the DMA descriptor to be big endian.
1623 */
1624 static int tsi148_dma_list_add(struct vme_dma_list *list,
1625 struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1626 {
1627 struct tsi148_dma_entry *entry, *prev;
1628 u32 address_high, address_low, val;
1629 struct vme_dma_pattern *pattern_attr;
1630 struct vme_dma_pci *pci_attr;
1631 struct vme_dma_vme *vme_attr;
1632 int retval = 0;
1633 struct vme_bridge *tsi148_bridge;
1634
1635 tsi148_bridge = list->parent->parent;
1636
1637 /* Descriptor must be aligned on 64-bit boundaries */
1638 entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1639 if (entry == NULL) {
1640 dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
1641 "dma resource structure\n");
1642 retval = -ENOMEM;
1643 goto err_mem;
1644 }
1645
1646 /* Test descriptor alignment */
1647 if ((unsigned long)&entry->descriptor & 0x7) {
1648 dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
1649 "byte boundary as required: %p\n",
1650 &entry->descriptor);
1651 retval = -EINVAL;
1652 goto err_align;
1653 }
1654
1655 /* Given we are going to fill out the structure, we probably don't
1656 * need to zero it, but better safe than sorry for now.
1657 */
1658 memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
1659
1660 /* Fill out source part */
1661 switch (src->type) {
1662 case VME_DMA_PATTERN:
1663 pattern_attr = src->private;
1664
1665 entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
1666
1667 val = TSI148_LCSR_DSAT_TYP_PAT;
1668
1669 /* Default behaviour is 32 bit pattern */
1670 if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
1671 val |= TSI148_LCSR_DSAT_PSZ;
1672
1673 /* It seems that the default behaviour is to increment */
1674 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
1675 val |= TSI148_LCSR_DSAT_NIN;
1676 entry->descriptor.dsat = cpu_to_be32(val);
1677 break;
1678 case VME_DMA_PCI:
1679 pci_attr = src->private;
1680
1681 reg_split((unsigned long long)pci_attr->address, &address_high,
1682 &address_low);
1683 entry->descriptor.dsau = cpu_to_be32(address_high);
1684 entry->descriptor.dsal = cpu_to_be32(address_low);
1685 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
1686 break;
1687 case VME_DMA_VME:
1688 vme_attr = src->private;
1689
1690 reg_split((unsigned long long)vme_attr->address, &address_high,
1691 &address_low);
1692 entry->descriptor.dsau = cpu_to_be32(address_high);
1693 entry->descriptor.dsal = cpu_to_be32(address_low);
1694 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
1695
1696 retval = tsi148_dma_set_vme_src_attributes(
1697 tsi148_bridge->parent, &entry->descriptor.dsat,
1698 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1699 if (retval < 0)
1700 goto err_source;
1701 break;
1702 default:
1703 dev_err(tsi148_bridge->parent, "Invalid source type\n");
1704 retval = -EINVAL;
1705 goto err_source;
1706 break;
1707 }
1708
1709 /* Assume last link - this will be over-written by adding another */
1710 entry->descriptor.dnlau = cpu_to_be32(0);
1711 entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
1712
1713 /* Fill out destination part */
1714 switch (dest->type) {
1715 case VME_DMA_PCI:
1716 pci_attr = dest->private;
1717
1718 reg_split((unsigned long long)pci_attr->address, &address_high,
1719 &address_low);
1720 entry->descriptor.ddau = cpu_to_be32(address_high);
1721 entry->descriptor.ddal = cpu_to_be32(address_low);
1722 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
1723 break;
1724 case VME_DMA_VME:
1725 vme_attr = dest->private;
1726
1727 reg_split((unsigned long long)vme_attr->address, &address_high,
1728 &address_low);
1729 entry->descriptor.ddau = cpu_to_be32(address_high);
1730 entry->descriptor.ddal = cpu_to_be32(address_low);
1731 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
1732
1733 retval = tsi148_dma_set_vme_dest_attributes(
1734 tsi148_bridge->parent, &entry->descriptor.ddat,
1735 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1736 if (retval < 0)
1737 goto err_dest;
1738 break;
1739 default:
1740 dev_err(tsi148_bridge->parent, "Invalid destination type\n");
1741 retval = -EINVAL;
1742 goto err_dest;
1743 break;
1744 }
1745
1746 /* Fill out count */
1747 entry->descriptor.dcnt = cpu_to_be32((u32)count);
1748
1749 /* Add to list */
1750 list_add_tail(&entry->list, &list->entries);
1751
1752 /* Fill out previous descriptors "Next Address" */
1753 if (entry->list.prev != &list->entries) {
1754 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1755 list);
1756 /* We need the bus address for the pointer */
1757 entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1758 &entry->descriptor,
1759 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1760
1761 reg_split((unsigned long long)entry->dma_handle, &address_high,
1762 &address_low);
1763 entry->descriptor.dnlau = cpu_to_be32(address_high);
1764 entry->descriptor.dnlal = cpu_to_be32(address_low);
1765
1766 }
1767
1768 return 0;
1769
1770 err_dest:
1771 err_source:
1772 err_align:
1773 kfree(entry);
1774 err_mem:
1775 return retval;
1776 }
1777
1778 /*
1779 * Check to see if the provided DMA channel is busy.
1780 */
1781 static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1782 {
1783 u32 tmp;
1784 struct tsi148_driver *bridge;
1785
1786 bridge = tsi148_bridge->driver_priv;
1787
1788 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1789 TSI148_LCSR_OFFSET_DSTA);
1790
1791 if (tmp & TSI148_LCSR_DSTA_BSY)
1792 return 0;
1793 else
1794 return 1;
1795
1796 }
1797
1798 /*
1799 * Execute a previously generated link list
1800 *
1801 * XXX Need to provide control register configuration.
1802 */
1803 static int tsi148_dma_list_exec(struct vme_dma_list *list)
1804 {
1805 struct vme_dma_resource *ctrlr;
1806 int channel, retval = 0;
1807 struct tsi148_dma_entry *entry;
1808 u32 bus_addr_high, bus_addr_low;
1809 u32 val, dctlreg = 0;
1810 struct vme_bridge *tsi148_bridge;
1811 struct tsi148_driver *bridge;
1812
1813 ctrlr = list->parent;
1814
1815 tsi148_bridge = ctrlr->parent;
1816
1817 bridge = tsi148_bridge->driver_priv;
1818
1819 mutex_lock(&ctrlr->mtx);
1820
1821 channel = ctrlr->number;
1822
1823 if (!list_empty(&ctrlr->running)) {
1824 /*
1825 * XXX We have an active DMA transfer and currently haven't
1826 * sorted out the mechanism for "pending" DMA transfers.
1827 * Return busy.
1828 */
1829 /* Need to add to pending here */
1830 mutex_unlock(&ctrlr->mtx);
1831 return -EBUSY;
1832 } else {
1833 list_add(&list->list, &ctrlr->running);
1834 }
1835
1836 /* Get first bus address and write into registers */
1837 entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
1838 list);
1839
1840 entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1841 &entry->descriptor,
1842 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1843
1844 mutex_unlock(&ctrlr->mtx);
1845
1846 reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
1847
1848 iowrite32be(bus_addr_high, bridge->base +
1849 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1850 iowrite32be(bus_addr_low, bridge->base +
1851 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1852
1853 dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1854 TSI148_LCSR_OFFSET_DCTL);
1855
1856 /* Start the operation */
1857 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1858 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1859
1860 wait_event_interruptible(bridge->dma_queue[channel],
1861 tsi148_dma_busy(ctrlr->parent, channel));
1862
1863 /*
1864 * Read status register, this register is valid until we kick off a
1865 * new transfer.
1866 */
1867 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1868 TSI148_LCSR_OFFSET_DSTA);
1869
1870 if (val & TSI148_LCSR_DSTA_VBE) {
1871 dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
1872 retval = -EIO;
1873 }
1874
1875 /* Remove list from running list */
1876 mutex_lock(&ctrlr->mtx);
1877 list_del(&list->list);
1878 mutex_unlock(&ctrlr->mtx);
1879
1880 return retval;
1881 }
1882
1883 /*
1884 * Clean up a previously generated link list
1885 *
1886 * We have a separate function, don't assume that the chain can't be reused.
1887 */
1888 static int tsi148_dma_list_empty(struct vme_dma_list *list)
1889 {
1890 struct list_head *pos, *temp;
1891 struct tsi148_dma_entry *entry;
1892
1893 struct vme_bridge *tsi148_bridge = list->parent->parent;
1894
1895 /* detach and free each entry */
1896 list_for_each_safe(pos, temp, &list->entries) {
1897 list_del(pos);
1898 entry = list_entry(pos, struct tsi148_dma_entry, list);
1899
1900 dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
1901 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1902 kfree(entry);
1903 }
1904
1905 return 0;
1906 }
1907
1908 /*
1909 * All 4 location monitors reside at the same base - this is therefore a
1910 * system wide configuration.
1911 *
1912 * This does not enable the LM monitor - that should be done when the first
1913 * callback is attached and disabled when the last callback is removed.
1914 */
1915 static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1916 u32 aspace, u32 cycle)
1917 {
1918 u32 lm_base_high, lm_base_low, lm_ctl = 0;
1919 int i;
1920 struct vme_bridge *tsi148_bridge;
1921 struct tsi148_driver *bridge;
1922
1923 tsi148_bridge = lm->parent;
1924
1925 bridge = tsi148_bridge->driver_priv;
1926
1927 mutex_lock(&lm->mtx);
1928
1929 /* If we already have a callback attached, we can't move it! */
1930 for (i = 0; i < lm->monitors; i++) {
1931 if (bridge->lm_callback[i] != NULL) {
1932 mutex_unlock(&lm->mtx);
1933 dev_err(tsi148_bridge->parent, "Location monitor "
1934 "callback attached, can't reset\n");
1935 return -EBUSY;
1936 }
1937 }
1938
1939 switch (aspace) {
1940 case VME_A16:
1941 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1942 break;
1943 case VME_A24:
1944 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1945 break;
1946 case VME_A32:
1947 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1948 break;
1949 case VME_A64:
1950 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1951 break;
1952 default:
1953 mutex_unlock(&lm->mtx);
1954 dev_err(tsi148_bridge->parent, "Invalid address space\n");
1955 return -EINVAL;
1956 break;
1957 }
1958
1959 if (cycle & VME_SUPER)
1960 lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
1961 if (cycle & VME_USER)
1962 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1963 if (cycle & VME_PROG)
1964 lm_ctl |= TSI148_LCSR_LMAT_PGM;
1965 if (cycle & VME_DATA)
1966 lm_ctl |= TSI148_LCSR_LMAT_DATA;
1967
1968 reg_split(lm_base, &lm_base_high, &lm_base_low);
1969
1970 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
1971 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
1972 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
1973
1974 mutex_unlock(&lm->mtx);
1975
1976 return 0;
1977 }
1978
1979 /* Get configuration of the callback monitor and return whether it is enabled
1980 * or disabled.
1981 */
1982 static int tsi148_lm_get(struct vme_lm_resource *lm,
1983 unsigned long long *lm_base, u32 *aspace, u32 *cycle)
1984 {
1985 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
1986 struct tsi148_driver *bridge;
1987
1988 bridge = lm->parent->driver_priv;
1989
1990 mutex_lock(&lm->mtx);
1991
1992 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
1993 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
1994 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
1995
1996 reg_join(lm_base_high, lm_base_low, lm_base);
1997
1998 if (lm_ctl & TSI148_LCSR_LMAT_EN)
1999 enabled = 1;
2000
2001 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
2002 *aspace |= VME_A16;
2003
2004 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
2005 *aspace |= VME_A24;
2006
2007 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
2008 *aspace |= VME_A32;
2009
2010 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
2011 *aspace |= VME_A64;
2012
2013
2014 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
2015 *cycle |= VME_SUPER;
2016 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
2017 *cycle |= VME_USER;
2018 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2019 *cycle |= VME_PROG;
2020 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2021 *cycle |= VME_DATA;
2022
2023 mutex_unlock(&lm->mtx);
2024
2025 return enabled;
2026 }
2027
2028 /*
2029 * Attach a callback to a specific location monitor.
2030 *
2031 * Callback will be passed the monitor triggered.
2032 */
2033 static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2034 void (*callback)(int))
2035 {
2036 u32 lm_ctl, tmp;
2037 struct vme_bridge *tsi148_bridge;
2038 struct tsi148_driver *bridge;
2039
2040 tsi148_bridge = lm->parent;
2041
2042 bridge = tsi148_bridge->driver_priv;
2043
2044 mutex_lock(&lm->mtx);
2045
2046 /* Ensure that the location monitor is configured - need PGM or DATA */
2047 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2048 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2049 mutex_unlock(&lm->mtx);
2050 dev_err(tsi148_bridge->parent, "Location monitor not properly "
2051 "configured\n");
2052 return -EINVAL;
2053 }
2054
2055 /* Check that a callback isn't already attached */
2056 if (bridge->lm_callback[monitor] != NULL) {
2057 mutex_unlock(&lm->mtx);
2058 dev_err(tsi148_bridge->parent, "Existing callback attached\n");
2059 return -EBUSY;
2060 }
2061
2062 /* Attach callback */
2063 bridge->lm_callback[monitor] = callback;
2064
2065 /* Enable Location Monitor interrupt */
2066 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2067 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2068 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2069
2070 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2071 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2072 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2073
2074 /* Ensure that global Location Monitor Enable set */
2075 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2076 lm_ctl |= TSI148_LCSR_LMAT_EN;
2077 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2078 }
2079
2080 mutex_unlock(&lm->mtx);
2081
2082 return 0;
2083 }
2084
2085 /*
2086 * Detach a callback function forn a specific location monitor.
2087 */
2088 static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2089 {
2090 u32 lm_en, tmp;
2091 struct tsi148_driver *bridge;
2092
2093 bridge = lm->parent->driver_priv;
2094
2095 mutex_lock(&lm->mtx);
2096
2097 /* Disable Location Monitor and ensure previous interrupts are clear */
2098 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2099 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2100 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2101
2102 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2103 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2104 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2105
2106 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2107 bridge->base + TSI148_LCSR_INTC);
2108
2109 /* Detach callback */
2110 bridge->lm_callback[monitor] = NULL;
2111
2112 /* If all location monitors disabled, disable global Location Monitor */
2113 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2114 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2115 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2116 tmp &= ~TSI148_LCSR_LMAT_EN;
2117 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2118 }
2119
2120 mutex_unlock(&lm->mtx);
2121
2122 return 0;
2123 }
2124
2125 /*
2126 * Determine Geographical Addressing
2127 */
2128 static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2129 {
2130 u32 slot = 0;
2131 struct tsi148_driver *bridge;
2132
2133 bridge = tsi148_bridge->driver_priv;
2134
2135 if (!geoid) {
2136 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2137 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2138 } else
2139 slot = geoid;
2140
2141 return (int)slot;
2142 }
2143
2144 void *tsi148_alloc_consistent(struct device *parent, size_t size,
2145 dma_addr_t *dma)
2146 {
2147 struct pci_dev *pdev;
2148
2149 /* Find pci_dev container of dev */
2150 pdev = container_of(parent, struct pci_dev, dev);
2151
2152 return pci_alloc_consistent(pdev, size, dma);
2153 }
2154
2155 void tsi148_free_consistent(struct device *parent, size_t size, void *vaddr,
2156 dma_addr_t dma)
2157 {
2158 struct pci_dev *pdev;
2159
2160 /* Find pci_dev container of dev */
2161 pdev = container_of(parent, struct pci_dev, dev);
2162
2163 pci_free_consistent(pdev, size, vaddr, dma);
2164 }
2165
2166 static int __init tsi148_init(void)
2167 {
2168 return pci_register_driver(&tsi148_driver);
2169 }
2170
2171 /*
2172 * Configure CR/CSR space
2173 *
2174 * Access to the CR/CSR can be configured at power-up. The location of the
2175 * CR/CSR registers in the CR/CSR address space is determined by the boards
2176 * Auto-ID or Geographic address. This function ensures that the window is
2177 * enabled at an offset consistent with the boards geopgraphic address.
2178 *
2179 * Each board has a 512kB window, with the highest 4kB being used for the
2180 * boards registers, this means there is a fix length 508kB window which must
2181 * be mapped onto PCI memory.
2182 */
2183 static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2184 struct pci_dev *pdev)
2185 {
2186 u32 cbar, crat, vstat;
2187 u32 crcsr_bus_high, crcsr_bus_low;
2188 int retval;
2189 struct tsi148_driver *bridge;
2190
2191 bridge = tsi148_bridge->driver_priv;
2192
2193 /* Allocate mem for CR/CSR image */
2194 bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2195 &bridge->crcsr_bus);
2196 if (bridge->crcsr_kernel == NULL) {
2197 dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
2198 "CR/CSR image\n");
2199 return -ENOMEM;
2200 }
2201
2202 memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
2203
2204 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2205
2206 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2207 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2208
2209 /* Ensure that the CR/CSR is configured at the correct offset */
2210 cbar = ioread32be(bridge->base + TSI148_CBAR);
2211 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2212
2213 vstat = tsi148_slot_get(tsi148_bridge);
2214
2215 if (cbar != vstat) {
2216 cbar = vstat;
2217 dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
2218 iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2219 }
2220 dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
2221
2222 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2223 if (crat & TSI148_LCSR_CRAT_EN) {
2224 dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
2225 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2226 bridge->base + TSI148_LCSR_CRAT);
2227 } else
2228 dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
2229
2230 /* If we want flushed, error-checked writes, set up a window
2231 * over the CR/CSR registers. We read from here to safely flush
2232 * through VME writes.
2233 */
2234 if (err_chk) {
2235 retval = tsi148_master_set(bridge->flush_image, 1,
2236 (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2237 VME_D16);
2238 if (retval)
2239 dev_err(tsi148_bridge->parent, "Configuring flush image"
2240 " failed\n");
2241 }
2242
2243 return 0;
2244
2245 }
2246
2247 static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2248 struct pci_dev *pdev)
2249 {
2250 u32 crat;
2251 struct tsi148_driver *bridge;
2252
2253 bridge = tsi148_bridge->driver_priv;
2254
2255 /* Turn off CR/CSR space */
2256 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2257 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2258 bridge->base + TSI148_LCSR_CRAT);
2259
2260 /* Free image */
2261 iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2262 iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2263
2264 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
2265 bridge->crcsr_bus);
2266 }
2267
2268 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2269 {
2270 int retval, i, master_num;
2271 u32 data;
2272 struct list_head *pos = NULL;
2273 struct vme_bridge *tsi148_bridge;
2274 struct tsi148_driver *tsi148_device;
2275 struct vme_master_resource *master_image;
2276 struct vme_slave_resource *slave_image;
2277 struct vme_dma_resource *dma_ctrlr;
2278 struct vme_lm_resource *lm;
2279
2280 /* If we want to support more than one of each bridge, we need to
2281 * dynamically generate this so we get one per device
2282 */
2283 tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
2284 if (tsi148_bridge == NULL) {
2285 dev_err(&pdev->dev, "Failed to allocate memory for device "
2286 "structure\n");
2287 retval = -ENOMEM;
2288 goto err_struct;
2289 }
2290
2291 tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
2292 if (tsi148_device == NULL) {
2293 dev_err(&pdev->dev, "Failed to allocate memory for device "
2294 "structure\n");
2295 retval = -ENOMEM;
2296 goto err_driver;
2297 }
2298
2299 tsi148_bridge->driver_priv = tsi148_device;
2300
2301 /* Enable the device */
2302 retval = pci_enable_device(pdev);
2303 if (retval) {
2304 dev_err(&pdev->dev, "Unable to enable device\n");
2305 goto err_enable;
2306 }
2307
2308 /* Map Registers */
2309 retval = pci_request_regions(pdev, driver_name);
2310 if (retval) {
2311 dev_err(&pdev->dev, "Unable to reserve resources\n");
2312 goto err_resource;
2313 }
2314
2315 /* map registers in BAR 0 */
2316 tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
2317 4096);
2318 if (!tsi148_device->base) {
2319 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2320 retval = -EIO;
2321 goto err_remap;
2322 }
2323
2324 /* Check to see if the mapping worked out */
2325 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2326 if (data != PCI_VENDOR_ID_TUNDRA) {
2327 dev_err(&pdev->dev, "CRG region check failed\n");
2328 retval = -EIO;
2329 goto err_test;
2330 }
2331
2332 /* Initialize wait queues & mutual exclusion flags */
2333 init_waitqueue_head(&tsi148_device->dma_queue[0]);
2334 init_waitqueue_head(&tsi148_device->dma_queue[1]);
2335 init_waitqueue_head(&tsi148_device->iack_queue);
2336 mutex_init(&tsi148_device->vme_int);
2337 mutex_init(&tsi148_device->vme_rmw);
2338
2339 tsi148_bridge->parent = &pdev->dev;
2340 strcpy(tsi148_bridge->name, driver_name);
2341
2342 /* Setup IRQ */
2343 retval = tsi148_irq_init(tsi148_bridge);
2344 if (retval != 0) {
2345 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2346 goto err_irq;
2347 }
2348
2349 /* If we are going to flush writes, we need to read from the VME bus.
2350 * We need to do this safely, thus we read the devices own CR/CSR
2351 * register. To do this we must set up a window in CR/CSR space and
2352 * hence have one less master window resource available.
2353 */
2354 master_num = TSI148_MAX_MASTER;
2355 if (err_chk) {
2356 master_num--;
2357
2358 tsi148_device->flush_image =
2359 kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
2360 if (tsi148_device->flush_image == NULL) {
2361 dev_err(&pdev->dev, "Failed to allocate memory for "
2362 "flush resource structure\n");
2363 retval = -ENOMEM;
2364 goto err_master;
2365 }
2366 tsi148_device->flush_image->parent = tsi148_bridge;
2367 spin_lock_init(&tsi148_device->flush_image->lock);
2368 tsi148_device->flush_image->locked = 1;
2369 tsi148_device->flush_image->number = master_num;
2370 tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
2371 VME_A32 | VME_A64;
2372 tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
2373 VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
2374 VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
2375 VME_USER | VME_PROG | VME_DATA;
2376 tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
2377 memset(&tsi148_device->flush_image->bus_resource, 0,
2378 sizeof(struct resource));
2379 tsi148_device->flush_image->kern_base = NULL;
2380 }
2381
2382 /* Add master windows to list */
2383 INIT_LIST_HEAD(&tsi148_bridge->master_resources);
2384 for (i = 0; i < master_num; i++) {
2385 master_image = kmalloc(sizeof(struct vme_master_resource),
2386 GFP_KERNEL);
2387 if (master_image == NULL) {
2388 dev_err(&pdev->dev, "Failed to allocate memory for "
2389 "master resource structure\n");
2390 retval = -ENOMEM;
2391 goto err_master;
2392 }
2393 master_image->parent = tsi148_bridge;
2394 spin_lock_init(&master_image->lock);
2395 master_image->locked = 0;
2396 master_image->number = i;
2397 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2398 VME_A64;
2399 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2400 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2401 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2402 VME_PROG | VME_DATA;
2403 master_image->width_attr = VME_D16 | VME_D32;
2404 memset(&master_image->bus_resource, 0,
2405 sizeof(struct resource));
2406 master_image->kern_base = NULL;
2407 list_add_tail(&master_image->list,
2408 &tsi148_bridge->master_resources);
2409 }
2410
2411 /* Add slave windows to list */
2412 INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
2413 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2414 slave_image = kmalloc(sizeof(struct vme_slave_resource),
2415 GFP_KERNEL);
2416 if (slave_image == NULL) {
2417 dev_err(&pdev->dev, "Failed to allocate memory for "
2418 "slave resource structure\n");
2419 retval = -ENOMEM;
2420 goto err_slave;
2421 }
2422 slave_image->parent = tsi148_bridge;
2423 mutex_init(&slave_image->mtx);
2424 slave_image->locked = 0;
2425 slave_image->number = i;
2426 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2427 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2428 VME_USER3 | VME_USER4;
2429 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2430 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2431 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2432 VME_PROG | VME_DATA;
2433 list_add_tail(&slave_image->list,
2434 &tsi148_bridge->slave_resources);
2435 }
2436
2437 /* Add dma engines to list */
2438 INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
2439 for (i = 0; i < TSI148_MAX_DMA; i++) {
2440 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
2441 GFP_KERNEL);
2442 if (dma_ctrlr == NULL) {
2443 dev_err(&pdev->dev, "Failed to allocate memory for "
2444 "dma resource structure\n");
2445 retval = -ENOMEM;
2446 goto err_dma;
2447 }
2448 dma_ctrlr->parent = tsi148_bridge;
2449 mutex_init(&dma_ctrlr->mtx);
2450 dma_ctrlr->locked = 0;
2451 dma_ctrlr->number = i;
2452 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2453 VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2454 VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2455 VME_DMA_PATTERN_TO_MEM;
2456 INIT_LIST_HEAD(&dma_ctrlr->pending);
2457 INIT_LIST_HEAD(&dma_ctrlr->running);
2458 list_add_tail(&dma_ctrlr->list,
2459 &tsi148_bridge->dma_resources);
2460 }
2461
2462 /* Add location monitor to list */
2463 INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
2464 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
2465 if (lm == NULL) {
2466 dev_err(&pdev->dev, "Failed to allocate memory for "
2467 "location monitor resource structure\n");
2468 retval = -ENOMEM;
2469 goto err_lm;
2470 }
2471 lm->parent = tsi148_bridge;
2472 mutex_init(&lm->mtx);
2473 lm->locked = 0;
2474 lm->number = 1;
2475 lm->monitors = 4;
2476 list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
2477
2478 tsi148_bridge->slave_get = tsi148_slave_get;
2479 tsi148_bridge->slave_set = tsi148_slave_set;
2480 tsi148_bridge->master_get = tsi148_master_get;
2481 tsi148_bridge->master_set = tsi148_master_set;
2482 tsi148_bridge->master_read = tsi148_master_read;
2483 tsi148_bridge->master_write = tsi148_master_write;
2484 tsi148_bridge->master_rmw = tsi148_master_rmw;
2485 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2486 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2487 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2488 tsi148_bridge->irq_set = tsi148_irq_set;
2489 tsi148_bridge->irq_generate = tsi148_irq_generate;
2490 tsi148_bridge->lm_set = tsi148_lm_set;
2491 tsi148_bridge->lm_get = tsi148_lm_get;
2492 tsi148_bridge->lm_attach = tsi148_lm_attach;
2493 tsi148_bridge->lm_detach = tsi148_lm_detach;
2494 tsi148_bridge->slot_get = tsi148_slot_get;
2495 tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
2496 tsi148_bridge->free_consistent = tsi148_free_consistent;
2497
2498 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2499 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2500 (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
2501 if (!geoid)
2502 dev_info(&pdev->dev, "VME geographical address is %d\n",
2503 data & TSI148_LCSR_VSTAT_GA_M);
2504 else
2505 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2506 geoid);
2507
2508 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2509 err_chk ? "enabled" : "disabled");
2510
2511 if (tsi148_crcsr_init(tsi148_bridge, pdev)) {
2512 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2513 goto err_crcsr;
2514 }
2515
2516 retval = vme_register_bridge(tsi148_bridge);
2517 if (retval != 0) {
2518 dev_err(&pdev->dev, "Chip Registration failed.\n");
2519 goto err_reg;
2520 }
2521
2522 pci_set_drvdata(pdev, tsi148_bridge);
2523
2524 /* Clear VME bus "board fail", and "power-up reset" lines */
2525 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2526 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2527 data |= TSI148_LCSR_VSTAT_CPURST;
2528 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2529
2530 return 0;
2531
2532 err_reg:
2533 tsi148_crcsr_exit(tsi148_bridge, pdev);
2534 err_crcsr:
2535 err_lm:
2536 /* resources are stored in link list */
2537 list_for_each(pos, &tsi148_bridge->lm_resources) {
2538 lm = list_entry(pos, struct vme_lm_resource, list);
2539 list_del(pos);
2540 kfree(lm);
2541 }
2542 err_dma:
2543 /* resources are stored in link list */
2544 list_for_each(pos, &tsi148_bridge->dma_resources) {
2545 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2546 list_del(pos);
2547 kfree(dma_ctrlr);
2548 }
2549 err_slave:
2550 /* resources are stored in link list */
2551 list_for_each(pos, &tsi148_bridge->slave_resources) {
2552 slave_image = list_entry(pos, struct vme_slave_resource, list);
2553 list_del(pos);
2554 kfree(slave_image);
2555 }
2556 err_master:
2557 /* resources are stored in link list */
2558 list_for_each(pos, &tsi148_bridge->master_resources) {
2559 master_image = list_entry(pos, struct vme_master_resource,
2560 list);
2561 list_del(pos);
2562 kfree(master_image);
2563 }
2564
2565 tsi148_irq_exit(tsi148_bridge, pdev);
2566 err_irq:
2567 err_test:
2568 iounmap(tsi148_device->base);
2569 err_remap:
2570 pci_release_regions(pdev);
2571 err_resource:
2572 pci_disable_device(pdev);
2573 err_enable:
2574 kfree(tsi148_device);
2575 err_driver:
2576 kfree(tsi148_bridge);
2577 err_struct:
2578 return retval;
2579
2580 }
2581
2582 static void tsi148_remove(struct pci_dev *pdev)
2583 {
2584 struct list_head *pos = NULL;
2585 struct list_head *tmplist;
2586 struct vme_master_resource *master_image;
2587 struct vme_slave_resource *slave_image;
2588 struct vme_dma_resource *dma_ctrlr;
2589 int i;
2590 struct tsi148_driver *bridge;
2591 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2592
2593 bridge = tsi148_bridge->driver_priv;
2594
2595
2596 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2597
2598 /*
2599 * Shutdown all inbound and outbound windows.
2600 */
2601 for (i = 0; i < 8; i++) {
2602 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2603 TSI148_LCSR_OFFSET_ITAT);
2604 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2605 TSI148_LCSR_OFFSET_OTAT);
2606 }
2607
2608 /*
2609 * Shutdown Location monitor.
2610 */
2611 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2612
2613 /*
2614 * Shutdown CRG map.
2615 */
2616 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2617
2618 /*
2619 * Clear error status.
2620 */
2621 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2622 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2623 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2624
2625 /*
2626 * Remove VIRQ interrupt (if any)
2627 */
2628 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2629 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2630
2631 /*
2632 * Map all Interrupts to PCI INTA
2633 */
2634 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2635 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2636
2637 tsi148_irq_exit(tsi148_bridge, pdev);
2638
2639 vme_unregister_bridge(tsi148_bridge);
2640
2641 tsi148_crcsr_exit(tsi148_bridge, pdev);
2642
2643 /* resources are stored in link list */
2644 list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
2645 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2646 list_del(pos);
2647 kfree(dma_ctrlr);
2648 }
2649
2650 /* resources are stored in link list */
2651 list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
2652 slave_image = list_entry(pos, struct vme_slave_resource, list);
2653 list_del(pos);
2654 kfree(slave_image);
2655 }
2656
2657 /* resources are stored in link list */
2658 list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
2659 master_image = list_entry(pos, struct vme_master_resource,
2660 list);
2661 list_del(pos);
2662 kfree(master_image);
2663 }
2664
2665 iounmap(bridge->base);
2666
2667 pci_release_regions(pdev);
2668
2669 pci_disable_device(pdev);
2670
2671 kfree(tsi148_bridge->driver_priv);
2672
2673 kfree(tsi148_bridge);
2674 }
2675
2676 static void __exit tsi148_exit(void)
2677 {
2678 pci_unregister_driver(&tsi148_driver);
2679 }
2680
2681 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2682 module_param(err_chk, bool, 0);
2683
2684 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2685 module_param(geoid, int, 0);
2686
2687 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2688 MODULE_LICENSE("GPL");
2689
2690 module_init(tsi148_init);
2691 module_exit(tsi148_exit);
This page took 0.105124 seconds and 5 git commands to generate.