VME: Stop using memcpy_[to|from]io() due to unwanted behaviour
[deliverable/linux.git] / drivers / vme / bridges / vme_tsi148.c
1 /*
2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/time.h>
30 #include <linux/io.h>
31 #include <linux/uaccess.h>
32 #include <linux/byteorder/generic.h>
33 #include <linux/vme.h>
34
35 #include "../vme_bridge.h"
36 #include "vme_tsi148.h"
37
38 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
39 static void tsi148_remove(struct pci_dev *);
40
41
42 /* Module parameter */
43 static bool err_chk;
44 static int geoid;
45
46 static const char driver_name[] = "vme_tsi148";
47
48 static const struct pci_device_id tsi148_ids[] = {
49 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
50 { },
51 };
52
53 static struct pci_driver tsi148_driver = {
54 .name = driver_name,
55 .id_table = tsi148_ids,
56 .probe = tsi148_probe,
57 .remove = tsi148_remove,
58 };
59
60 static void reg_join(unsigned int high, unsigned int low,
61 unsigned long long *variable)
62 {
63 *variable = (unsigned long long)high << 32;
64 *variable |= (unsigned long long)low;
65 }
66
67 static void reg_split(unsigned long long variable, unsigned int *high,
68 unsigned int *low)
69 {
70 *low = (unsigned int)variable & 0xFFFFFFFF;
71 *high = (unsigned int)(variable >> 32);
72 }
73
74 /*
75 * Wakes up DMA queue.
76 */
77 static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
78 int channel_mask)
79 {
80 u32 serviced = 0;
81
82 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
83 wake_up(&bridge->dma_queue[0]);
84 serviced |= TSI148_LCSR_INTC_DMA0C;
85 }
86 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
87 wake_up(&bridge->dma_queue[1]);
88 serviced |= TSI148_LCSR_INTC_DMA1C;
89 }
90
91 return serviced;
92 }
93
94 /*
95 * Wake up location monitor queue
96 */
97 static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
98 {
99 int i;
100 u32 serviced = 0;
101
102 for (i = 0; i < 4; i++) {
103 if (stat & TSI148_LCSR_INTS_LMS[i]) {
104 /* We only enable interrupts if the callback is set */
105 bridge->lm_callback[i](i);
106 serviced |= TSI148_LCSR_INTC_LMC[i];
107 }
108 }
109
110 return serviced;
111 }
112
113 /*
114 * Wake up mail box queue.
115 *
116 * XXX This functionality is not exposed up though API.
117 */
118 static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
119 {
120 int i;
121 u32 val;
122 u32 serviced = 0;
123 struct tsi148_driver *bridge;
124
125 bridge = tsi148_bridge->driver_priv;
126
127 for (i = 0; i < 4; i++) {
128 if (stat & TSI148_LCSR_INTS_MBS[i]) {
129 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
130 dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
131 ": 0x%x\n", i, val);
132 serviced |= TSI148_LCSR_INTC_MBC[i];
133 }
134 }
135
136 return serviced;
137 }
138
139 /*
140 * Display error & status message when PERR (PCI) exception interrupt occurs.
141 */
142 static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
143 {
144 struct tsi148_driver *bridge;
145
146 bridge = tsi148_bridge->driver_priv;
147
148 dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
149 "attributes: %08x\n",
150 ioread32be(bridge->base + TSI148_LCSR_EDPAU),
151 ioread32be(bridge->base + TSI148_LCSR_EDPAL),
152 ioread32be(bridge->base + TSI148_LCSR_EDPAT));
153
154 dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
155 "completion reg: %08x\n",
156 ioread32be(bridge->base + TSI148_LCSR_EDPXA),
157 ioread32be(bridge->base + TSI148_LCSR_EDPXS));
158
159 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
160
161 return TSI148_LCSR_INTC_PERRC;
162 }
163
164 /*
165 * Save address and status when VME error interrupt occurs.
166 */
167 static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
168 {
169 unsigned int error_addr_high, error_addr_low;
170 unsigned long long error_addr;
171 u32 error_attrib;
172 struct vme_bus_error *error = NULL;
173 struct tsi148_driver *bridge;
174
175 bridge = tsi148_bridge->driver_priv;
176
177 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
178 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
179 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
180
181 reg_join(error_addr_high, error_addr_low, &error_addr);
182
183 /* Check for exception register overflow (we have lost error data) */
184 if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
185 dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
186 "Occurred\n");
187 }
188
189 if (err_chk) {
190 error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC);
191 if (error) {
192 error->address = error_addr;
193 error->attributes = error_attrib;
194 list_add_tail(&error->list, &tsi148_bridge->vme_errors);
195 } else {
196 dev_err(tsi148_bridge->parent,
197 "Unable to alloc memory for VMEbus Error reporting\n");
198 }
199 }
200
201 if (!error) {
202 dev_err(tsi148_bridge->parent,
203 "VME Bus Error at address: 0x%llx, attributes: %08x\n",
204 error_addr, error_attrib);
205 }
206
207 /* Clear Status */
208 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
209
210 return TSI148_LCSR_INTC_VERRC;
211 }
212
213 /*
214 * Wake up IACK queue.
215 */
216 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
217 {
218 wake_up(&bridge->iack_queue);
219
220 return TSI148_LCSR_INTC_IACKC;
221 }
222
223 /*
224 * Calling VME bus interrupt callback if provided.
225 */
226 static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
227 u32 stat)
228 {
229 int vec, i, serviced = 0;
230 struct tsi148_driver *bridge;
231
232 bridge = tsi148_bridge->driver_priv;
233
234 for (i = 7; i > 0; i--) {
235 if (stat & (1 << i)) {
236 /*
237 * Note: Even though the registers are defined as
238 * 32-bits in the spec, we only want to issue 8-bit
239 * IACK cycles on the bus, read from offset 3.
240 */
241 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
242
243 vme_irq_handler(tsi148_bridge, i, vec);
244
245 serviced |= (1 << i);
246 }
247 }
248
249 return serviced;
250 }
251
252 /*
253 * Top level interrupt handler. Clears appropriate interrupt status bits and
254 * then calls appropriate sub handler(s).
255 */
256 static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
257 {
258 u32 stat, enable, serviced = 0;
259 struct vme_bridge *tsi148_bridge;
260 struct tsi148_driver *bridge;
261
262 tsi148_bridge = ptr;
263
264 bridge = tsi148_bridge->driver_priv;
265
266 /* Determine which interrupts are unmasked and set */
267 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
268 stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
269
270 /* Only look at unmasked interrupts */
271 stat &= enable;
272
273 if (unlikely(!stat))
274 return IRQ_NONE;
275
276 /* Call subhandlers as appropriate */
277 /* DMA irqs */
278 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
279 serviced |= tsi148_DMA_irqhandler(bridge, stat);
280
281 /* Location monitor irqs */
282 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
283 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
284 serviced |= tsi148_LM_irqhandler(bridge, stat);
285
286 /* Mail box irqs */
287 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
288 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
289 serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
290
291 /* PCI bus error */
292 if (stat & TSI148_LCSR_INTS_PERRS)
293 serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
294
295 /* VME bus error */
296 if (stat & TSI148_LCSR_INTS_VERRS)
297 serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
298
299 /* IACK irq */
300 if (stat & TSI148_LCSR_INTS_IACKS)
301 serviced |= tsi148_IACK_irqhandler(bridge);
302
303 /* VME bus irqs */
304 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
305 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
306 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
307 TSI148_LCSR_INTS_IRQ1S))
308 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
309
310 /* Clear serviced interrupts */
311 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
312
313 return IRQ_HANDLED;
314 }
315
316 static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
317 {
318 int result;
319 unsigned int tmp;
320 struct pci_dev *pdev;
321 struct tsi148_driver *bridge;
322
323 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
324
325 bridge = tsi148_bridge->driver_priv;
326
327 /* Initialise list for VME bus errors */
328 INIT_LIST_HEAD(&tsi148_bridge->vme_errors);
329
330 mutex_init(&tsi148_bridge->irq_mtx);
331
332 result = request_irq(pdev->irq,
333 tsi148_irqhandler,
334 IRQF_SHARED,
335 driver_name, tsi148_bridge);
336 if (result) {
337 dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
338 "vector %02X\n", pdev->irq);
339 return result;
340 }
341
342 /* Enable and unmask interrupts */
343 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
344 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
345 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
346 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
347 TSI148_LCSR_INTEO_IACKEO;
348
349 /* This leaves the following interrupts masked.
350 * TSI148_LCSR_INTEO_VIEEO
351 * TSI148_LCSR_INTEO_SYSFLEO
352 * TSI148_LCSR_INTEO_ACFLEO
353 */
354
355 /* Don't enable Location Monitor interrupts here - they will be
356 * enabled when the location monitors are properly configured and
357 * a callback has been attached.
358 * TSI148_LCSR_INTEO_LM0EO
359 * TSI148_LCSR_INTEO_LM1EO
360 * TSI148_LCSR_INTEO_LM2EO
361 * TSI148_LCSR_INTEO_LM3EO
362 */
363
364 /* Don't enable VME interrupts until we add a handler, else the board
365 * will respond to it and we don't want that unless it knows how to
366 * properly deal with it.
367 * TSI148_LCSR_INTEO_IRQ7EO
368 * TSI148_LCSR_INTEO_IRQ6EO
369 * TSI148_LCSR_INTEO_IRQ5EO
370 * TSI148_LCSR_INTEO_IRQ4EO
371 * TSI148_LCSR_INTEO_IRQ3EO
372 * TSI148_LCSR_INTEO_IRQ2EO
373 * TSI148_LCSR_INTEO_IRQ1EO
374 */
375
376 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
377 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
378
379 return 0;
380 }
381
382 static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
383 struct pci_dev *pdev)
384 {
385 struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
386
387 /* Turn off interrupts */
388 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
389 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
390
391 /* Clear all interrupts */
392 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
393
394 /* Detach interrupt handler */
395 free_irq(pdev->irq, tsi148_bridge);
396 }
397
398 /*
399 * Check to see if an IACk has been received, return true (1) or false (0).
400 */
401 static int tsi148_iack_received(struct tsi148_driver *bridge)
402 {
403 u32 tmp;
404
405 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
406
407 if (tmp & TSI148_LCSR_VICR_IRQS)
408 return 0;
409 else
410 return 1;
411 }
412
413 /*
414 * Configure VME interrupt
415 */
416 static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
417 int state, int sync)
418 {
419 struct pci_dev *pdev;
420 u32 tmp;
421 struct tsi148_driver *bridge;
422
423 bridge = tsi148_bridge->driver_priv;
424
425 /* We need to do the ordering differently for enabling and disabling */
426 if (state == 0) {
427 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
428 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
429 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
430
431 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
432 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
433 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
434
435 if (sync != 0) {
436 pdev = container_of(tsi148_bridge->parent,
437 struct pci_dev, dev);
438
439 synchronize_irq(pdev->irq);
440 }
441 } else {
442 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
443 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
444 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
445
446 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
447 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
448 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
449 }
450 }
451
452 /*
453 * Generate a VME bus interrupt at the requested level & vector. Wait for
454 * interrupt to be acked.
455 */
456 static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
457 int statid)
458 {
459 u32 tmp;
460 struct tsi148_driver *bridge;
461
462 bridge = tsi148_bridge->driver_priv;
463
464 mutex_lock(&bridge->vme_int);
465
466 /* Read VICR register */
467 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
468
469 /* Set Status/ID */
470 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
471 (statid & TSI148_LCSR_VICR_STID_M);
472 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
473
474 /* Assert VMEbus IRQ */
475 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
476 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
477
478 /* XXX Consider implementing a timeout? */
479 wait_event_interruptible(bridge->iack_queue,
480 tsi148_iack_received(bridge));
481
482 mutex_unlock(&bridge->vme_int);
483
484 return 0;
485 }
486
487 /*
488 * Find the first error in this address range
489 */
490 static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
491 u32 aspace, unsigned long long address, size_t count)
492 {
493 struct list_head *err_pos;
494 struct vme_bus_error *vme_err, *valid = NULL;
495 unsigned long long bound;
496
497 bound = address + count;
498
499 /*
500 * XXX We are currently not looking at the address space when parsing
501 * for errors. This is because parsing the Address Modifier Codes
502 * is going to be quite resource intensive to do properly. We
503 * should be OK just looking at the addresses and this is certainly
504 * much better than what we had before.
505 */
506 err_pos = NULL;
507 /* Iterate through errors */
508 list_for_each(err_pos, &tsi148_bridge->vme_errors) {
509 vme_err = list_entry(err_pos, struct vme_bus_error, list);
510 if ((vme_err->address >= address) &&
511 (vme_err->address < bound)) {
512
513 valid = vme_err;
514 break;
515 }
516 }
517
518 return valid;
519 }
520
521 /*
522 * Clear errors in the provided address range.
523 */
524 static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
525 u32 aspace, unsigned long long address, size_t count)
526 {
527 struct list_head *err_pos, *temp;
528 struct vme_bus_error *vme_err;
529 unsigned long long bound;
530
531 bound = address + count;
532
533 /*
534 * XXX We are currently not looking at the address space when parsing
535 * for errors. This is because parsing the Address Modifier Codes
536 * is going to be quite resource intensive to do properly. We
537 * should be OK just looking at the addresses and this is certainly
538 * much better than what we had before.
539 */
540 err_pos = NULL;
541 /* Iterate through errors */
542 list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) {
543 vme_err = list_entry(err_pos, struct vme_bus_error, list);
544
545 if ((vme_err->address >= address) &&
546 (vme_err->address < bound)) {
547
548 list_del(err_pos);
549 kfree(vme_err);
550 }
551 }
552 }
553
554 /*
555 * Initialize a slave window with the requested attributes.
556 */
557 static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
558 unsigned long long vme_base, unsigned long long size,
559 dma_addr_t pci_base, u32 aspace, u32 cycle)
560 {
561 unsigned int i, addr = 0, granularity = 0;
562 unsigned int temp_ctl = 0;
563 unsigned int vme_base_low, vme_base_high;
564 unsigned int vme_bound_low, vme_bound_high;
565 unsigned int pci_offset_low, pci_offset_high;
566 unsigned long long vme_bound, pci_offset;
567 struct vme_bridge *tsi148_bridge;
568 struct tsi148_driver *bridge;
569
570 tsi148_bridge = image->parent;
571 bridge = tsi148_bridge->driver_priv;
572
573 i = image->number;
574
575 switch (aspace) {
576 case VME_A16:
577 granularity = 0x10;
578 addr |= TSI148_LCSR_ITAT_AS_A16;
579 break;
580 case VME_A24:
581 granularity = 0x1000;
582 addr |= TSI148_LCSR_ITAT_AS_A24;
583 break;
584 case VME_A32:
585 granularity = 0x10000;
586 addr |= TSI148_LCSR_ITAT_AS_A32;
587 break;
588 case VME_A64:
589 granularity = 0x10000;
590 addr |= TSI148_LCSR_ITAT_AS_A64;
591 break;
592 case VME_CRCSR:
593 case VME_USER1:
594 case VME_USER2:
595 case VME_USER3:
596 case VME_USER4:
597 default:
598 dev_err(tsi148_bridge->parent, "Invalid address space\n");
599 return -EINVAL;
600 break;
601 }
602
603 /* Convert 64-bit variables to 2x 32-bit variables */
604 reg_split(vme_base, &vme_base_high, &vme_base_low);
605
606 /*
607 * Bound address is a valid address for the window, adjust
608 * accordingly
609 */
610 vme_bound = vme_base + size - granularity;
611 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
612 pci_offset = (unsigned long long)pci_base - vme_base;
613 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
614
615 if (vme_base_low & (granularity - 1)) {
616 dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
617 return -EINVAL;
618 }
619 if (vme_bound_low & (granularity - 1)) {
620 dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
621 return -EINVAL;
622 }
623 if (pci_offset_low & (granularity - 1)) {
624 dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
625 "alignment\n");
626 return -EINVAL;
627 }
628
629 /* Disable while we are mucking around */
630 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
631 TSI148_LCSR_OFFSET_ITAT);
632 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
633 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
634 TSI148_LCSR_OFFSET_ITAT);
635
636 /* Setup mapping */
637 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
638 TSI148_LCSR_OFFSET_ITSAU);
639 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
640 TSI148_LCSR_OFFSET_ITSAL);
641 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
642 TSI148_LCSR_OFFSET_ITEAU);
643 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
644 TSI148_LCSR_OFFSET_ITEAL);
645 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
646 TSI148_LCSR_OFFSET_ITOFU);
647 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
648 TSI148_LCSR_OFFSET_ITOFL);
649
650 /* Setup 2eSST speeds */
651 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
652 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
653 case VME_2eSST160:
654 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
655 break;
656 case VME_2eSST267:
657 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
658 break;
659 case VME_2eSST320:
660 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
661 break;
662 }
663
664 /* Setup cycle types */
665 temp_ctl &= ~(0x1F << 7);
666 if (cycle & VME_BLT)
667 temp_ctl |= TSI148_LCSR_ITAT_BLT;
668 if (cycle & VME_MBLT)
669 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
670 if (cycle & VME_2eVME)
671 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
672 if (cycle & VME_2eSST)
673 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
674 if (cycle & VME_2eSSTB)
675 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
676
677 /* Setup address space */
678 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
679 temp_ctl |= addr;
680
681 temp_ctl &= ~0xF;
682 if (cycle & VME_SUPER)
683 temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
684 if (cycle & VME_USER)
685 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
686 if (cycle & VME_PROG)
687 temp_ctl |= TSI148_LCSR_ITAT_PGM;
688 if (cycle & VME_DATA)
689 temp_ctl |= TSI148_LCSR_ITAT_DATA;
690
691 /* Write ctl reg without enable */
692 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
693 TSI148_LCSR_OFFSET_ITAT);
694
695 if (enabled)
696 temp_ctl |= TSI148_LCSR_ITAT_EN;
697
698 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
699 TSI148_LCSR_OFFSET_ITAT);
700
701 return 0;
702 }
703
704 /*
705 * Get slave window configuration.
706 */
707 static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
708 unsigned long long *vme_base, unsigned long long *size,
709 dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
710 {
711 unsigned int i, granularity = 0, ctl = 0;
712 unsigned int vme_base_low, vme_base_high;
713 unsigned int vme_bound_low, vme_bound_high;
714 unsigned int pci_offset_low, pci_offset_high;
715 unsigned long long vme_bound, pci_offset;
716 struct tsi148_driver *bridge;
717
718 bridge = image->parent->driver_priv;
719
720 i = image->number;
721
722 /* Read registers */
723 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
724 TSI148_LCSR_OFFSET_ITAT);
725
726 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
727 TSI148_LCSR_OFFSET_ITSAU);
728 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
729 TSI148_LCSR_OFFSET_ITSAL);
730 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
731 TSI148_LCSR_OFFSET_ITEAU);
732 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
733 TSI148_LCSR_OFFSET_ITEAL);
734 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
735 TSI148_LCSR_OFFSET_ITOFU);
736 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
737 TSI148_LCSR_OFFSET_ITOFL);
738
739 /* Convert 64-bit variables to 2x 32-bit variables */
740 reg_join(vme_base_high, vme_base_low, vme_base);
741 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
742 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
743
744 *pci_base = (dma_addr_t)vme_base + pci_offset;
745
746 *enabled = 0;
747 *aspace = 0;
748 *cycle = 0;
749
750 if (ctl & TSI148_LCSR_ITAT_EN)
751 *enabled = 1;
752
753 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
754 granularity = 0x10;
755 *aspace |= VME_A16;
756 }
757 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
758 granularity = 0x1000;
759 *aspace |= VME_A24;
760 }
761 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
762 granularity = 0x10000;
763 *aspace |= VME_A32;
764 }
765 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
766 granularity = 0x10000;
767 *aspace |= VME_A64;
768 }
769
770 /* Need granularity before we set the size */
771 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
772
773
774 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
775 *cycle |= VME_2eSST160;
776 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
777 *cycle |= VME_2eSST267;
778 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
779 *cycle |= VME_2eSST320;
780
781 if (ctl & TSI148_LCSR_ITAT_BLT)
782 *cycle |= VME_BLT;
783 if (ctl & TSI148_LCSR_ITAT_MBLT)
784 *cycle |= VME_MBLT;
785 if (ctl & TSI148_LCSR_ITAT_2eVME)
786 *cycle |= VME_2eVME;
787 if (ctl & TSI148_LCSR_ITAT_2eSST)
788 *cycle |= VME_2eSST;
789 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
790 *cycle |= VME_2eSSTB;
791
792 if (ctl & TSI148_LCSR_ITAT_SUPR)
793 *cycle |= VME_SUPER;
794 if (ctl & TSI148_LCSR_ITAT_NPRIV)
795 *cycle |= VME_USER;
796 if (ctl & TSI148_LCSR_ITAT_PGM)
797 *cycle |= VME_PROG;
798 if (ctl & TSI148_LCSR_ITAT_DATA)
799 *cycle |= VME_DATA;
800
801 return 0;
802 }
803
804 /*
805 * Allocate and map PCI Resource
806 */
807 static int tsi148_alloc_resource(struct vme_master_resource *image,
808 unsigned long long size)
809 {
810 unsigned long long existing_size;
811 int retval = 0;
812 struct pci_dev *pdev;
813 struct vme_bridge *tsi148_bridge;
814
815 tsi148_bridge = image->parent;
816
817 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
818
819 existing_size = (unsigned long long)(image->bus_resource.end -
820 image->bus_resource.start);
821
822 /* If the existing size is OK, return */
823 if ((size != 0) && (existing_size == (size - 1)))
824 return 0;
825
826 if (existing_size != 0) {
827 iounmap(image->kern_base);
828 image->kern_base = NULL;
829 kfree(image->bus_resource.name);
830 release_resource(&image->bus_resource);
831 memset(&image->bus_resource, 0, sizeof(struct resource));
832 }
833
834 /* Exit here if size is zero */
835 if (size == 0)
836 return 0;
837
838 if (image->bus_resource.name == NULL) {
839 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
840 if (image->bus_resource.name == NULL) {
841 dev_err(tsi148_bridge->parent, "Unable to allocate "
842 "memory for resource name\n");
843 retval = -ENOMEM;
844 goto err_name;
845 }
846 }
847
848 sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
849 image->number);
850
851 image->bus_resource.start = 0;
852 image->bus_resource.end = (unsigned long)size;
853 image->bus_resource.flags = IORESOURCE_MEM;
854
855 retval = pci_bus_alloc_resource(pdev->bus,
856 &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
857 0, NULL, NULL);
858 if (retval) {
859 dev_err(tsi148_bridge->parent, "Failed to allocate mem "
860 "resource for window %d size 0x%lx start 0x%lx\n",
861 image->number, (unsigned long)size,
862 (unsigned long)image->bus_resource.start);
863 goto err_resource;
864 }
865
866 image->kern_base = ioremap_nocache(
867 image->bus_resource.start, size);
868 if (image->kern_base == NULL) {
869 dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
870 retval = -ENOMEM;
871 goto err_remap;
872 }
873
874 return 0;
875
876 err_remap:
877 release_resource(&image->bus_resource);
878 err_resource:
879 kfree(image->bus_resource.name);
880 memset(&image->bus_resource, 0, sizeof(struct resource));
881 err_name:
882 return retval;
883 }
884
885 /*
886 * Free and unmap PCI Resource
887 */
888 static void tsi148_free_resource(struct vme_master_resource *image)
889 {
890 iounmap(image->kern_base);
891 image->kern_base = NULL;
892 release_resource(&image->bus_resource);
893 kfree(image->bus_resource.name);
894 memset(&image->bus_resource, 0, sizeof(struct resource));
895 }
896
897 /*
898 * Set the attributes of an outbound window.
899 */
900 static int tsi148_master_set(struct vme_master_resource *image, int enabled,
901 unsigned long long vme_base, unsigned long long size, u32 aspace,
902 u32 cycle, u32 dwidth)
903 {
904 int retval = 0;
905 unsigned int i;
906 unsigned int temp_ctl = 0;
907 unsigned int pci_base_low, pci_base_high;
908 unsigned int pci_bound_low, pci_bound_high;
909 unsigned int vme_offset_low, vme_offset_high;
910 unsigned long long pci_bound, vme_offset, pci_base;
911 struct vme_bridge *tsi148_bridge;
912 struct tsi148_driver *bridge;
913
914 tsi148_bridge = image->parent;
915
916 bridge = tsi148_bridge->driver_priv;
917
918 /* Verify input data */
919 if (vme_base & 0xFFFF) {
920 dev_err(tsi148_bridge->parent, "Invalid VME Window "
921 "alignment\n");
922 retval = -EINVAL;
923 goto err_window;
924 }
925
926 if ((size == 0) && (enabled != 0)) {
927 dev_err(tsi148_bridge->parent, "Size must be non-zero for "
928 "enabled windows\n");
929 retval = -EINVAL;
930 goto err_window;
931 }
932
933 spin_lock(&image->lock);
934
935 /* Let's allocate the resource here rather than further up the stack as
936 * it avoids pushing loads of bus dependent stuff up the stack. If size
937 * is zero, any existing resource will be freed.
938 */
939 retval = tsi148_alloc_resource(image, size);
940 if (retval) {
941 spin_unlock(&image->lock);
942 dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
943 "resource\n");
944 goto err_res;
945 }
946
947 if (size == 0) {
948 pci_base = 0;
949 pci_bound = 0;
950 vme_offset = 0;
951 } else {
952 pci_base = (unsigned long long)image->bus_resource.start;
953
954 /*
955 * Bound address is a valid address for the window, adjust
956 * according to window granularity.
957 */
958 pci_bound = pci_base + (size - 0x10000);
959 vme_offset = vme_base - pci_base;
960 }
961
962 /* Convert 64-bit variables to 2x 32-bit variables */
963 reg_split(pci_base, &pci_base_high, &pci_base_low);
964 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
965 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
966
967 if (pci_base_low & 0xFFFF) {
968 spin_unlock(&image->lock);
969 dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
970 retval = -EINVAL;
971 goto err_gran;
972 }
973 if (pci_bound_low & 0xFFFF) {
974 spin_unlock(&image->lock);
975 dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
976 retval = -EINVAL;
977 goto err_gran;
978 }
979 if (vme_offset_low & 0xFFFF) {
980 spin_unlock(&image->lock);
981 dev_err(tsi148_bridge->parent, "Invalid VME Offset "
982 "alignment\n");
983 retval = -EINVAL;
984 goto err_gran;
985 }
986
987 i = image->number;
988
989 /* Disable while we are mucking around */
990 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
991 TSI148_LCSR_OFFSET_OTAT);
992 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
993 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
994 TSI148_LCSR_OFFSET_OTAT);
995
996 /* Setup 2eSST speeds */
997 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
998 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
999 case VME_2eSST160:
1000 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
1001 break;
1002 case VME_2eSST267:
1003 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
1004 break;
1005 case VME_2eSST320:
1006 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
1007 break;
1008 }
1009
1010 /* Setup cycle types */
1011 if (cycle & VME_BLT) {
1012 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1013 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
1014 }
1015 if (cycle & VME_MBLT) {
1016 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1017 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
1018 }
1019 if (cycle & VME_2eVME) {
1020 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1021 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
1022 }
1023 if (cycle & VME_2eSST) {
1024 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1025 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
1026 }
1027 if (cycle & VME_2eSSTB) {
1028 dev_warn(tsi148_bridge->parent, "Currently not setting "
1029 "Broadcast Select Registers\n");
1030 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1031 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
1032 }
1033
1034 /* Setup data width */
1035 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
1036 switch (dwidth) {
1037 case VME_D16:
1038 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
1039 break;
1040 case VME_D32:
1041 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
1042 break;
1043 default:
1044 spin_unlock(&image->lock);
1045 dev_err(tsi148_bridge->parent, "Invalid data width\n");
1046 retval = -EINVAL;
1047 goto err_dwidth;
1048 }
1049
1050 /* Setup address space */
1051 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
1052 switch (aspace) {
1053 case VME_A16:
1054 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
1055 break;
1056 case VME_A24:
1057 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
1058 break;
1059 case VME_A32:
1060 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
1061 break;
1062 case VME_A64:
1063 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
1064 break;
1065 case VME_CRCSR:
1066 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
1067 break;
1068 case VME_USER1:
1069 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
1070 break;
1071 case VME_USER2:
1072 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
1073 break;
1074 case VME_USER3:
1075 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
1076 break;
1077 case VME_USER4:
1078 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
1079 break;
1080 default:
1081 spin_unlock(&image->lock);
1082 dev_err(tsi148_bridge->parent, "Invalid address space\n");
1083 retval = -EINVAL;
1084 goto err_aspace;
1085 break;
1086 }
1087
1088 temp_ctl &= ~(3<<4);
1089 if (cycle & VME_SUPER)
1090 temp_ctl |= TSI148_LCSR_OTAT_SUP;
1091 if (cycle & VME_PROG)
1092 temp_ctl |= TSI148_LCSR_OTAT_PGM;
1093
1094 /* Setup mapping */
1095 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
1096 TSI148_LCSR_OFFSET_OTSAU);
1097 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
1098 TSI148_LCSR_OFFSET_OTSAL);
1099 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1100 TSI148_LCSR_OFFSET_OTEAU);
1101 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1102 TSI148_LCSR_OFFSET_OTEAL);
1103 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1104 TSI148_LCSR_OFFSET_OTOFU);
1105 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1106 TSI148_LCSR_OFFSET_OTOFL);
1107
1108 /* Write ctl reg without enable */
1109 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1110 TSI148_LCSR_OFFSET_OTAT);
1111
1112 if (enabled)
1113 temp_ctl |= TSI148_LCSR_OTAT_EN;
1114
1115 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1116 TSI148_LCSR_OFFSET_OTAT);
1117
1118 spin_unlock(&image->lock);
1119 return 0;
1120
1121 err_aspace:
1122 err_dwidth:
1123 err_gran:
1124 tsi148_free_resource(image);
1125 err_res:
1126 err_window:
1127 return retval;
1128
1129 }
1130
1131 /*
1132 * Set the attributes of an outbound window.
1133 *
1134 * XXX Not parsing prefetch information.
1135 */
1136 static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
1137 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1138 u32 *cycle, u32 *dwidth)
1139 {
1140 unsigned int i, ctl;
1141 unsigned int pci_base_low, pci_base_high;
1142 unsigned int pci_bound_low, pci_bound_high;
1143 unsigned int vme_offset_low, vme_offset_high;
1144
1145 unsigned long long pci_base, pci_bound, vme_offset;
1146 struct tsi148_driver *bridge;
1147
1148 bridge = image->parent->driver_priv;
1149
1150 i = image->number;
1151
1152 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1153 TSI148_LCSR_OFFSET_OTAT);
1154
1155 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1156 TSI148_LCSR_OFFSET_OTSAU);
1157 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1158 TSI148_LCSR_OFFSET_OTSAL);
1159 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1160 TSI148_LCSR_OFFSET_OTEAU);
1161 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1162 TSI148_LCSR_OFFSET_OTEAL);
1163 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1164 TSI148_LCSR_OFFSET_OTOFU);
1165 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1166 TSI148_LCSR_OFFSET_OTOFL);
1167
1168 /* Convert 64-bit variables to 2x 32-bit variables */
1169 reg_join(pci_base_high, pci_base_low, &pci_base);
1170 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1171 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1172
1173 *vme_base = pci_base + vme_offset;
1174 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1175
1176 *enabled = 0;
1177 *aspace = 0;
1178 *cycle = 0;
1179 *dwidth = 0;
1180
1181 if (ctl & TSI148_LCSR_OTAT_EN)
1182 *enabled = 1;
1183
1184 /* Setup address space */
1185 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1186 *aspace |= VME_A16;
1187 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1188 *aspace |= VME_A24;
1189 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1190 *aspace |= VME_A32;
1191 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1192 *aspace |= VME_A64;
1193 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1194 *aspace |= VME_CRCSR;
1195 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1196 *aspace |= VME_USER1;
1197 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1198 *aspace |= VME_USER2;
1199 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1200 *aspace |= VME_USER3;
1201 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1202 *aspace |= VME_USER4;
1203
1204 /* Setup 2eSST speeds */
1205 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1206 *cycle |= VME_2eSST160;
1207 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1208 *cycle |= VME_2eSST267;
1209 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1210 *cycle |= VME_2eSST320;
1211
1212 /* Setup cycle types */
1213 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
1214 *cycle |= VME_SCT;
1215 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
1216 *cycle |= VME_BLT;
1217 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
1218 *cycle |= VME_MBLT;
1219 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
1220 *cycle |= VME_2eVME;
1221 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
1222 *cycle |= VME_2eSST;
1223 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
1224 *cycle |= VME_2eSSTB;
1225
1226 if (ctl & TSI148_LCSR_OTAT_SUP)
1227 *cycle |= VME_SUPER;
1228 else
1229 *cycle |= VME_USER;
1230
1231 if (ctl & TSI148_LCSR_OTAT_PGM)
1232 *cycle |= VME_PROG;
1233 else
1234 *cycle |= VME_DATA;
1235
1236 /* Setup data width */
1237 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1238 *dwidth = VME_D16;
1239 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1240 *dwidth = VME_D32;
1241
1242 return 0;
1243 }
1244
1245
1246 static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
1247 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1248 u32 *cycle, u32 *dwidth)
1249 {
1250 int retval;
1251
1252 spin_lock(&image->lock);
1253
1254 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1255 cycle, dwidth);
1256
1257 spin_unlock(&image->lock);
1258
1259 return retval;
1260 }
1261
1262 static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1263 size_t count, loff_t offset)
1264 {
1265 int retval, enabled;
1266 unsigned long long vme_base, size;
1267 u32 aspace, cycle, dwidth;
1268 struct vme_bus_error *vme_err = NULL;
1269 struct vme_bridge *tsi148_bridge;
1270 void __iomem *addr = image->kern_base + offset;
1271 unsigned int done = 0;
1272 unsigned int count32;
1273
1274 tsi148_bridge = image->parent;
1275
1276 spin_lock(&image->lock);
1277
1278 /* The following code handles VME address alignment. We cannot use
1279 * memcpy_xxx here because it may cut data transfers in to 8-bit
1280 * cycles when D16 or D32 cycles are required on the VME bus.
1281 * On the other hand, the bridge itself assures that the maximum data
1282 * cycle configured for the transfer is used and splits it
1283 * automatically for non-aligned addresses, so we don't want the
1284 * overhead of needlessly forcing small transfers for the entire cycle.
1285 */
1286 if ((uintptr_t)addr & 0x1) {
1287 *(u8 *)buf = ioread8(addr);
1288 done += 1;
1289 if (done == count)
1290 goto out;
1291 }
1292 if ((uintptr_t)addr & 0x2) {
1293 if ((count - done) < 2) {
1294 *(u8 *)(buf + done) = ioread8(addr + done);
1295 done += 1;
1296 goto out;
1297 } else {
1298 *(u16 *)(buf + done) = ioread16(addr + done);
1299 done += 2;
1300 }
1301 }
1302
1303 count32 = (count - done) & ~0x3;
1304 while (done < count32) {
1305 *(u32 *)(buf + done) = ioread32(addr + done);
1306 done += 4;
1307 }
1308
1309 if ((count - done) & 0x2) {
1310 *(u16 *)(buf + done) = ioread16(addr + done);
1311 done += 2;
1312 }
1313 if ((count - done) & 0x1) {
1314 *(u8 *)(buf + done) = ioread8(addr + done);
1315 done += 1;
1316 }
1317
1318 out:
1319 retval = count;
1320
1321 if (!err_chk)
1322 goto skip_chk;
1323
1324 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1325 &dwidth);
1326
1327 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1328 count);
1329 if (vme_err != NULL) {
1330 dev_err(image->parent->parent, "First VME read error detected "
1331 "an at address 0x%llx\n", vme_err->address);
1332 retval = vme_err->address - (vme_base + offset);
1333 /* Clear down save errors in this address range */
1334 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1335 count);
1336 }
1337
1338 skip_chk:
1339 spin_unlock(&image->lock);
1340
1341 return retval;
1342 }
1343
1344
1345 static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1346 size_t count, loff_t offset)
1347 {
1348 int retval = 0, enabled;
1349 unsigned long long vme_base, size;
1350 u32 aspace, cycle, dwidth;
1351 void __iomem *addr = image->kern_base + offset;
1352 unsigned int done = 0;
1353 unsigned int count32;
1354
1355 struct vme_bus_error *vme_err = NULL;
1356 struct vme_bridge *tsi148_bridge;
1357 struct tsi148_driver *bridge;
1358
1359 tsi148_bridge = image->parent;
1360
1361 bridge = tsi148_bridge->driver_priv;
1362
1363 spin_lock(&image->lock);
1364
1365 /* Here we apply for the same strategy we do in master_read
1366 * function in order to assure the correct cycles.
1367 */
1368 if ((uintptr_t)addr & 0x1) {
1369 iowrite8(*(u8 *)buf, addr);
1370 done += 1;
1371 if (done == count)
1372 goto out;
1373 }
1374 if ((uintptr_t)addr & 0x2) {
1375 if ((count - done) < 2) {
1376 iowrite8(*(u8 *)(buf + done), addr + done);
1377 done += 1;
1378 goto out;
1379 } else {
1380 iowrite16(*(u16 *)(buf + done), addr + done);
1381 done += 2;
1382 }
1383 }
1384
1385 count32 = (count - done) & ~0x3;
1386 while (done < count32) {
1387 iowrite32(*(u32 *)(buf + done), addr + done);
1388 done += 4;
1389 }
1390
1391 if ((count - done) & 0x2) {
1392 iowrite16(*(u16 *)(buf + done), addr + done);
1393 done += 2;
1394 }
1395 if ((count - done) & 0x1) {
1396 iowrite8(*(u8 *)(buf + done), addr + done);
1397 done += 1;
1398 }
1399
1400 out:
1401 retval = count;
1402
1403 /*
1404 * Writes are posted. We need to do a read on the VME bus to flush out
1405 * all of the writes before we check for errors. We can't guarantee
1406 * that reading the data we have just written is safe. It is believed
1407 * that there isn't any read, write re-ordering, so we can read any
1408 * location in VME space, so lets read the Device ID from the tsi148's
1409 * own registers as mapped into CR/CSR space.
1410 *
1411 * We check for saved errors in the written address range/space.
1412 */
1413
1414 if (!err_chk)
1415 goto skip_chk;
1416
1417 /*
1418 * Get window info first, to maximise the time that the buffers may
1419 * fluch on their own
1420 */
1421 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1422 &dwidth);
1423
1424 ioread16(bridge->flush_image->kern_base + 0x7F000);
1425
1426 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1427 count);
1428 if (vme_err != NULL) {
1429 dev_warn(tsi148_bridge->parent, "First VME write error detected"
1430 " an at address 0x%llx\n", vme_err->address);
1431 retval = vme_err->address - (vme_base + offset);
1432 /* Clear down save errors in this address range */
1433 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1434 count);
1435 }
1436
1437 skip_chk:
1438 spin_unlock(&image->lock);
1439
1440 return retval;
1441 }
1442
1443 /*
1444 * Perform an RMW cycle on the VME bus.
1445 *
1446 * Requires a previously configured master window, returns final value.
1447 */
1448 static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1449 unsigned int mask, unsigned int compare, unsigned int swap,
1450 loff_t offset)
1451 {
1452 unsigned long long pci_addr;
1453 unsigned int pci_addr_high, pci_addr_low;
1454 u32 tmp, result;
1455 int i;
1456 struct tsi148_driver *bridge;
1457
1458 bridge = image->parent->driver_priv;
1459
1460 /* Find the PCI address that maps to the desired VME address */
1461 i = image->number;
1462
1463 /* Locking as we can only do one of these at a time */
1464 mutex_lock(&bridge->vme_rmw);
1465
1466 /* Lock image */
1467 spin_lock(&image->lock);
1468
1469 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1470 TSI148_LCSR_OFFSET_OTSAU);
1471 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1472 TSI148_LCSR_OFFSET_OTSAL);
1473
1474 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1475 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1476
1477 /* Configure registers */
1478 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1479 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1480 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1481 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1482 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1483
1484 /* Enable RMW */
1485 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1486 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1487 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1488
1489 /* Kick process off with a read to the required address. */
1490 result = ioread32be(image->kern_base + offset);
1491
1492 /* Disable RMW */
1493 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1494 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1495 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1496
1497 spin_unlock(&image->lock);
1498
1499 mutex_unlock(&bridge->vme_rmw);
1500
1501 return result;
1502 }
1503
1504 static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
1505 u32 aspace, u32 cycle, u32 dwidth)
1506 {
1507 u32 val;
1508
1509 val = be32_to_cpu(*attr);
1510
1511 /* Setup 2eSST speeds */
1512 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1513 case VME_2eSST160:
1514 val |= TSI148_LCSR_DSAT_2eSSTM_160;
1515 break;
1516 case VME_2eSST267:
1517 val |= TSI148_LCSR_DSAT_2eSSTM_267;
1518 break;
1519 case VME_2eSST320:
1520 val |= TSI148_LCSR_DSAT_2eSSTM_320;
1521 break;
1522 }
1523
1524 /* Setup cycle types */
1525 if (cycle & VME_SCT)
1526 val |= TSI148_LCSR_DSAT_TM_SCT;
1527
1528 if (cycle & VME_BLT)
1529 val |= TSI148_LCSR_DSAT_TM_BLT;
1530
1531 if (cycle & VME_MBLT)
1532 val |= TSI148_LCSR_DSAT_TM_MBLT;
1533
1534 if (cycle & VME_2eVME)
1535 val |= TSI148_LCSR_DSAT_TM_2eVME;
1536
1537 if (cycle & VME_2eSST)
1538 val |= TSI148_LCSR_DSAT_TM_2eSST;
1539
1540 if (cycle & VME_2eSSTB) {
1541 dev_err(dev, "Currently not setting Broadcast Select "
1542 "Registers\n");
1543 val |= TSI148_LCSR_DSAT_TM_2eSSTB;
1544 }
1545
1546 /* Setup data width */
1547 switch (dwidth) {
1548 case VME_D16:
1549 val |= TSI148_LCSR_DSAT_DBW_16;
1550 break;
1551 case VME_D32:
1552 val |= TSI148_LCSR_DSAT_DBW_32;
1553 break;
1554 default:
1555 dev_err(dev, "Invalid data width\n");
1556 return -EINVAL;
1557 }
1558
1559 /* Setup address space */
1560 switch (aspace) {
1561 case VME_A16:
1562 val |= TSI148_LCSR_DSAT_AMODE_A16;
1563 break;
1564 case VME_A24:
1565 val |= TSI148_LCSR_DSAT_AMODE_A24;
1566 break;
1567 case VME_A32:
1568 val |= TSI148_LCSR_DSAT_AMODE_A32;
1569 break;
1570 case VME_A64:
1571 val |= TSI148_LCSR_DSAT_AMODE_A64;
1572 break;
1573 case VME_CRCSR:
1574 val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1575 break;
1576 case VME_USER1:
1577 val |= TSI148_LCSR_DSAT_AMODE_USER1;
1578 break;
1579 case VME_USER2:
1580 val |= TSI148_LCSR_DSAT_AMODE_USER2;
1581 break;
1582 case VME_USER3:
1583 val |= TSI148_LCSR_DSAT_AMODE_USER3;
1584 break;
1585 case VME_USER4:
1586 val |= TSI148_LCSR_DSAT_AMODE_USER4;
1587 break;
1588 default:
1589 dev_err(dev, "Invalid address space\n");
1590 return -EINVAL;
1591 break;
1592 }
1593
1594 if (cycle & VME_SUPER)
1595 val |= TSI148_LCSR_DSAT_SUP;
1596 if (cycle & VME_PROG)
1597 val |= TSI148_LCSR_DSAT_PGM;
1598
1599 *attr = cpu_to_be32(val);
1600
1601 return 0;
1602 }
1603
1604 static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
1605 u32 aspace, u32 cycle, u32 dwidth)
1606 {
1607 u32 val;
1608
1609 val = be32_to_cpu(*attr);
1610
1611 /* Setup 2eSST speeds */
1612 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1613 case VME_2eSST160:
1614 val |= TSI148_LCSR_DDAT_2eSSTM_160;
1615 break;
1616 case VME_2eSST267:
1617 val |= TSI148_LCSR_DDAT_2eSSTM_267;
1618 break;
1619 case VME_2eSST320:
1620 val |= TSI148_LCSR_DDAT_2eSSTM_320;
1621 break;
1622 }
1623
1624 /* Setup cycle types */
1625 if (cycle & VME_SCT)
1626 val |= TSI148_LCSR_DDAT_TM_SCT;
1627
1628 if (cycle & VME_BLT)
1629 val |= TSI148_LCSR_DDAT_TM_BLT;
1630
1631 if (cycle & VME_MBLT)
1632 val |= TSI148_LCSR_DDAT_TM_MBLT;
1633
1634 if (cycle & VME_2eVME)
1635 val |= TSI148_LCSR_DDAT_TM_2eVME;
1636
1637 if (cycle & VME_2eSST)
1638 val |= TSI148_LCSR_DDAT_TM_2eSST;
1639
1640 if (cycle & VME_2eSSTB) {
1641 dev_err(dev, "Currently not setting Broadcast Select "
1642 "Registers\n");
1643 val |= TSI148_LCSR_DDAT_TM_2eSSTB;
1644 }
1645
1646 /* Setup data width */
1647 switch (dwidth) {
1648 case VME_D16:
1649 val |= TSI148_LCSR_DDAT_DBW_16;
1650 break;
1651 case VME_D32:
1652 val |= TSI148_LCSR_DDAT_DBW_32;
1653 break;
1654 default:
1655 dev_err(dev, "Invalid data width\n");
1656 return -EINVAL;
1657 }
1658
1659 /* Setup address space */
1660 switch (aspace) {
1661 case VME_A16:
1662 val |= TSI148_LCSR_DDAT_AMODE_A16;
1663 break;
1664 case VME_A24:
1665 val |= TSI148_LCSR_DDAT_AMODE_A24;
1666 break;
1667 case VME_A32:
1668 val |= TSI148_LCSR_DDAT_AMODE_A32;
1669 break;
1670 case VME_A64:
1671 val |= TSI148_LCSR_DDAT_AMODE_A64;
1672 break;
1673 case VME_CRCSR:
1674 val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1675 break;
1676 case VME_USER1:
1677 val |= TSI148_LCSR_DDAT_AMODE_USER1;
1678 break;
1679 case VME_USER2:
1680 val |= TSI148_LCSR_DDAT_AMODE_USER2;
1681 break;
1682 case VME_USER3:
1683 val |= TSI148_LCSR_DDAT_AMODE_USER3;
1684 break;
1685 case VME_USER4:
1686 val |= TSI148_LCSR_DDAT_AMODE_USER4;
1687 break;
1688 default:
1689 dev_err(dev, "Invalid address space\n");
1690 return -EINVAL;
1691 break;
1692 }
1693
1694 if (cycle & VME_SUPER)
1695 val |= TSI148_LCSR_DDAT_SUP;
1696 if (cycle & VME_PROG)
1697 val |= TSI148_LCSR_DDAT_PGM;
1698
1699 *attr = cpu_to_be32(val);
1700
1701 return 0;
1702 }
1703
1704 /*
1705 * Add a link list descriptor to the list
1706 *
1707 * Note: DMA engine expects the DMA descriptor to be big endian.
1708 */
1709 static int tsi148_dma_list_add(struct vme_dma_list *list,
1710 struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1711 {
1712 struct tsi148_dma_entry *entry, *prev;
1713 u32 address_high, address_low, val;
1714 struct vme_dma_pattern *pattern_attr;
1715 struct vme_dma_pci *pci_attr;
1716 struct vme_dma_vme *vme_attr;
1717 int retval = 0;
1718 struct vme_bridge *tsi148_bridge;
1719
1720 tsi148_bridge = list->parent->parent;
1721
1722 /* Descriptor must be aligned on 64-bit boundaries */
1723 entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1724 if (entry == NULL) {
1725 dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
1726 "dma resource structure\n");
1727 retval = -ENOMEM;
1728 goto err_mem;
1729 }
1730
1731 /* Test descriptor alignment */
1732 if ((unsigned long)&entry->descriptor & 0x7) {
1733 dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
1734 "byte boundary as required: %p\n",
1735 &entry->descriptor);
1736 retval = -EINVAL;
1737 goto err_align;
1738 }
1739
1740 /* Given we are going to fill out the structure, we probably don't
1741 * need to zero it, but better safe than sorry for now.
1742 */
1743 memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
1744
1745 /* Fill out source part */
1746 switch (src->type) {
1747 case VME_DMA_PATTERN:
1748 pattern_attr = src->private;
1749
1750 entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
1751
1752 val = TSI148_LCSR_DSAT_TYP_PAT;
1753
1754 /* Default behaviour is 32 bit pattern */
1755 if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
1756 val |= TSI148_LCSR_DSAT_PSZ;
1757
1758 /* It seems that the default behaviour is to increment */
1759 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
1760 val |= TSI148_LCSR_DSAT_NIN;
1761 entry->descriptor.dsat = cpu_to_be32(val);
1762 break;
1763 case VME_DMA_PCI:
1764 pci_attr = src->private;
1765
1766 reg_split((unsigned long long)pci_attr->address, &address_high,
1767 &address_low);
1768 entry->descriptor.dsau = cpu_to_be32(address_high);
1769 entry->descriptor.dsal = cpu_to_be32(address_low);
1770 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
1771 break;
1772 case VME_DMA_VME:
1773 vme_attr = src->private;
1774
1775 reg_split((unsigned long long)vme_attr->address, &address_high,
1776 &address_low);
1777 entry->descriptor.dsau = cpu_to_be32(address_high);
1778 entry->descriptor.dsal = cpu_to_be32(address_low);
1779 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
1780
1781 retval = tsi148_dma_set_vme_src_attributes(
1782 tsi148_bridge->parent, &entry->descriptor.dsat,
1783 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1784 if (retval < 0)
1785 goto err_source;
1786 break;
1787 default:
1788 dev_err(tsi148_bridge->parent, "Invalid source type\n");
1789 retval = -EINVAL;
1790 goto err_source;
1791 break;
1792 }
1793
1794 /* Assume last link - this will be over-written by adding another */
1795 entry->descriptor.dnlau = cpu_to_be32(0);
1796 entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
1797
1798 /* Fill out destination part */
1799 switch (dest->type) {
1800 case VME_DMA_PCI:
1801 pci_attr = dest->private;
1802
1803 reg_split((unsigned long long)pci_attr->address, &address_high,
1804 &address_low);
1805 entry->descriptor.ddau = cpu_to_be32(address_high);
1806 entry->descriptor.ddal = cpu_to_be32(address_low);
1807 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
1808 break;
1809 case VME_DMA_VME:
1810 vme_attr = dest->private;
1811
1812 reg_split((unsigned long long)vme_attr->address, &address_high,
1813 &address_low);
1814 entry->descriptor.ddau = cpu_to_be32(address_high);
1815 entry->descriptor.ddal = cpu_to_be32(address_low);
1816 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
1817
1818 retval = tsi148_dma_set_vme_dest_attributes(
1819 tsi148_bridge->parent, &entry->descriptor.ddat,
1820 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1821 if (retval < 0)
1822 goto err_dest;
1823 break;
1824 default:
1825 dev_err(tsi148_bridge->parent, "Invalid destination type\n");
1826 retval = -EINVAL;
1827 goto err_dest;
1828 break;
1829 }
1830
1831 /* Fill out count */
1832 entry->descriptor.dcnt = cpu_to_be32((u32)count);
1833
1834 /* Add to list */
1835 list_add_tail(&entry->list, &list->entries);
1836
1837 /* Fill out previous descriptors "Next Address" */
1838 if (entry->list.prev != &list->entries) {
1839 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1840 list);
1841 /* We need the bus address for the pointer */
1842 entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1843 &entry->descriptor,
1844 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1845
1846 reg_split((unsigned long long)entry->dma_handle, &address_high,
1847 &address_low);
1848 entry->descriptor.dnlau = cpu_to_be32(address_high);
1849 entry->descriptor.dnlal = cpu_to_be32(address_low);
1850
1851 }
1852
1853 return 0;
1854
1855 err_dest:
1856 err_source:
1857 err_align:
1858 kfree(entry);
1859 err_mem:
1860 return retval;
1861 }
1862
1863 /*
1864 * Check to see if the provided DMA channel is busy.
1865 */
1866 static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1867 {
1868 u32 tmp;
1869 struct tsi148_driver *bridge;
1870
1871 bridge = tsi148_bridge->driver_priv;
1872
1873 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1874 TSI148_LCSR_OFFSET_DSTA);
1875
1876 if (tmp & TSI148_LCSR_DSTA_BSY)
1877 return 0;
1878 else
1879 return 1;
1880
1881 }
1882
1883 /*
1884 * Execute a previously generated link list
1885 *
1886 * XXX Need to provide control register configuration.
1887 */
1888 static int tsi148_dma_list_exec(struct vme_dma_list *list)
1889 {
1890 struct vme_dma_resource *ctrlr;
1891 int channel, retval = 0;
1892 struct tsi148_dma_entry *entry;
1893 u32 bus_addr_high, bus_addr_low;
1894 u32 val, dctlreg = 0;
1895 struct vme_bridge *tsi148_bridge;
1896 struct tsi148_driver *bridge;
1897
1898 ctrlr = list->parent;
1899
1900 tsi148_bridge = ctrlr->parent;
1901
1902 bridge = tsi148_bridge->driver_priv;
1903
1904 mutex_lock(&ctrlr->mtx);
1905
1906 channel = ctrlr->number;
1907
1908 if (!list_empty(&ctrlr->running)) {
1909 /*
1910 * XXX We have an active DMA transfer and currently haven't
1911 * sorted out the mechanism for "pending" DMA transfers.
1912 * Return busy.
1913 */
1914 /* Need to add to pending here */
1915 mutex_unlock(&ctrlr->mtx);
1916 return -EBUSY;
1917 } else {
1918 list_add(&list->list, &ctrlr->running);
1919 }
1920
1921 /* Get first bus address and write into registers */
1922 entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
1923 list);
1924
1925 entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1926 &entry->descriptor,
1927 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1928
1929 mutex_unlock(&ctrlr->mtx);
1930
1931 reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
1932
1933 iowrite32be(bus_addr_high, bridge->base +
1934 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1935 iowrite32be(bus_addr_low, bridge->base +
1936 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1937
1938 dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1939 TSI148_LCSR_OFFSET_DCTL);
1940
1941 /* Start the operation */
1942 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1943 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1944
1945 wait_event_interruptible(bridge->dma_queue[channel],
1946 tsi148_dma_busy(ctrlr->parent, channel));
1947
1948 /*
1949 * Read status register, this register is valid until we kick off a
1950 * new transfer.
1951 */
1952 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1953 TSI148_LCSR_OFFSET_DSTA);
1954
1955 if (val & TSI148_LCSR_DSTA_VBE) {
1956 dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
1957 retval = -EIO;
1958 }
1959
1960 /* Remove list from running list */
1961 mutex_lock(&ctrlr->mtx);
1962 list_del(&list->list);
1963 mutex_unlock(&ctrlr->mtx);
1964
1965 return retval;
1966 }
1967
1968 /*
1969 * Clean up a previously generated link list
1970 *
1971 * We have a separate function, don't assume that the chain can't be reused.
1972 */
1973 static int tsi148_dma_list_empty(struct vme_dma_list *list)
1974 {
1975 struct list_head *pos, *temp;
1976 struct tsi148_dma_entry *entry;
1977
1978 struct vme_bridge *tsi148_bridge = list->parent->parent;
1979
1980 /* detach and free each entry */
1981 list_for_each_safe(pos, temp, &list->entries) {
1982 list_del(pos);
1983 entry = list_entry(pos, struct tsi148_dma_entry, list);
1984
1985 dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
1986 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1987 kfree(entry);
1988 }
1989
1990 return 0;
1991 }
1992
1993 /*
1994 * All 4 location monitors reside at the same base - this is therefore a
1995 * system wide configuration.
1996 *
1997 * This does not enable the LM monitor - that should be done when the first
1998 * callback is attached and disabled when the last callback is removed.
1999 */
2000 static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
2001 u32 aspace, u32 cycle)
2002 {
2003 u32 lm_base_high, lm_base_low, lm_ctl = 0;
2004 int i;
2005 struct vme_bridge *tsi148_bridge;
2006 struct tsi148_driver *bridge;
2007
2008 tsi148_bridge = lm->parent;
2009
2010 bridge = tsi148_bridge->driver_priv;
2011
2012 mutex_lock(&lm->mtx);
2013
2014 /* If we already have a callback attached, we can't move it! */
2015 for (i = 0; i < lm->monitors; i++) {
2016 if (bridge->lm_callback[i] != NULL) {
2017 mutex_unlock(&lm->mtx);
2018 dev_err(tsi148_bridge->parent, "Location monitor "
2019 "callback attached, can't reset\n");
2020 return -EBUSY;
2021 }
2022 }
2023
2024 switch (aspace) {
2025 case VME_A16:
2026 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
2027 break;
2028 case VME_A24:
2029 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
2030 break;
2031 case VME_A32:
2032 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
2033 break;
2034 case VME_A64:
2035 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
2036 break;
2037 default:
2038 mutex_unlock(&lm->mtx);
2039 dev_err(tsi148_bridge->parent, "Invalid address space\n");
2040 return -EINVAL;
2041 break;
2042 }
2043
2044 if (cycle & VME_SUPER)
2045 lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
2046 if (cycle & VME_USER)
2047 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
2048 if (cycle & VME_PROG)
2049 lm_ctl |= TSI148_LCSR_LMAT_PGM;
2050 if (cycle & VME_DATA)
2051 lm_ctl |= TSI148_LCSR_LMAT_DATA;
2052
2053 reg_split(lm_base, &lm_base_high, &lm_base_low);
2054
2055 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
2056 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
2057 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2058
2059 mutex_unlock(&lm->mtx);
2060
2061 return 0;
2062 }
2063
2064 /* Get configuration of the callback monitor and return whether it is enabled
2065 * or disabled.
2066 */
2067 static int tsi148_lm_get(struct vme_lm_resource *lm,
2068 unsigned long long *lm_base, u32 *aspace, u32 *cycle)
2069 {
2070 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
2071 struct tsi148_driver *bridge;
2072
2073 bridge = lm->parent->driver_priv;
2074
2075 mutex_lock(&lm->mtx);
2076
2077 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
2078 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
2079 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2080
2081 reg_join(lm_base_high, lm_base_low, lm_base);
2082
2083 if (lm_ctl & TSI148_LCSR_LMAT_EN)
2084 enabled = 1;
2085
2086 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
2087 *aspace |= VME_A16;
2088
2089 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
2090 *aspace |= VME_A24;
2091
2092 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
2093 *aspace |= VME_A32;
2094
2095 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
2096 *aspace |= VME_A64;
2097
2098
2099 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
2100 *cycle |= VME_SUPER;
2101 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
2102 *cycle |= VME_USER;
2103 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2104 *cycle |= VME_PROG;
2105 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2106 *cycle |= VME_DATA;
2107
2108 mutex_unlock(&lm->mtx);
2109
2110 return enabled;
2111 }
2112
2113 /*
2114 * Attach a callback to a specific location monitor.
2115 *
2116 * Callback will be passed the monitor triggered.
2117 */
2118 static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2119 void (*callback)(int))
2120 {
2121 u32 lm_ctl, tmp;
2122 struct vme_bridge *tsi148_bridge;
2123 struct tsi148_driver *bridge;
2124
2125 tsi148_bridge = lm->parent;
2126
2127 bridge = tsi148_bridge->driver_priv;
2128
2129 mutex_lock(&lm->mtx);
2130
2131 /* Ensure that the location monitor is configured - need PGM or DATA */
2132 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2133 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2134 mutex_unlock(&lm->mtx);
2135 dev_err(tsi148_bridge->parent, "Location monitor not properly "
2136 "configured\n");
2137 return -EINVAL;
2138 }
2139
2140 /* Check that a callback isn't already attached */
2141 if (bridge->lm_callback[monitor] != NULL) {
2142 mutex_unlock(&lm->mtx);
2143 dev_err(tsi148_bridge->parent, "Existing callback attached\n");
2144 return -EBUSY;
2145 }
2146
2147 /* Attach callback */
2148 bridge->lm_callback[monitor] = callback;
2149
2150 /* Enable Location Monitor interrupt */
2151 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2152 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2153 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2154
2155 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2156 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2157 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2158
2159 /* Ensure that global Location Monitor Enable set */
2160 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2161 lm_ctl |= TSI148_LCSR_LMAT_EN;
2162 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2163 }
2164
2165 mutex_unlock(&lm->mtx);
2166
2167 return 0;
2168 }
2169
2170 /*
2171 * Detach a callback function forn a specific location monitor.
2172 */
2173 static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2174 {
2175 u32 lm_en, tmp;
2176 struct tsi148_driver *bridge;
2177
2178 bridge = lm->parent->driver_priv;
2179
2180 mutex_lock(&lm->mtx);
2181
2182 /* Disable Location Monitor and ensure previous interrupts are clear */
2183 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2184 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2185 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2186
2187 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2188 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2189 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2190
2191 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2192 bridge->base + TSI148_LCSR_INTC);
2193
2194 /* Detach callback */
2195 bridge->lm_callback[monitor] = NULL;
2196
2197 /* If all location monitors disabled, disable global Location Monitor */
2198 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2199 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2200 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2201 tmp &= ~TSI148_LCSR_LMAT_EN;
2202 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2203 }
2204
2205 mutex_unlock(&lm->mtx);
2206
2207 return 0;
2208 }
2209
2210 /*
2211 * Determine Geographical Addressing
2212 */
2213 static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2214 {
2215 u32 slot = 0;
2216 struct tsi148_driver *bridge;
2217
2218 bridge = tsi148_bridge->driver_priv;
2219
2220 if (!geoid) {
2221 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2222 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2223 } else
2224 slot = geoid;
2225
2226 return (int)slot;
2227 }
2228
2229 static void *tsi148_alloc_consistent(struct device *parent, size_t size,
2230 dma_addr_t *dma)
2231 {
2232 struct pci_dev *pdev;
2233
2234 /* Find pci_dev container of dev */
2235 pdev = container_of(parent, struct pci_dev, dev);
2236
2237 return pci_alloc_consistent(pdev, size, dma);
2238 }
2239
2240 static void tsi148_free_consistent(struct device *parent, size_t size,
2241 void *vaddr, dma_addr_t dma)
2242 {
2243 struct pci_dev *pdev;
2244
2245 /* Find pci_dev container of dev */
2246 pdev = container_of(parent, struct pci_dev, dev);
2247
2248 pci_free_consistent(pdev, size, vaddr, dma);
2249 }
2250
2251 /*
2252 * Configure CR/CSR space
2253 *
2254 * Access to the CR/CSR can be configured at power-up. The location of the
2255 * CR/CSR registers in the CR/CSR address space is determined by the boards
2256 * Auto-ID or Geographic address. This function ensures that the window is
2257 * enabled at an offset consistent with the boards geopgraphic address.
2258 *
2259 * Each board has a 512kB window, with the highest 4kB being used for the
2260 * boards registers, this means there is a fix length 508kB window which must
2261 * be mapped onto PCI memory.
2262 */
2263 static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2264 struct pci_dev *pdev)
2265 {
2266 u32 cbar, crat, vstat;
2267 u32 crcsr_bus_high, crcsr_bus_low;
2268 int retval;
2269 struct tsi148_driver *bridge;
2270
2271 bridge = tsi148_bridge->driver_priv;
2272
2273 /* Allocate mem for CR/CSR image */
2274 bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2275 &bridge->crcsr_bus);
2276 if (bridge->crcsr_kernel == NULL) {
2277 dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
2278 "CR/CSR image\n");
2279 return -ENOMEM;
2280 }
2281
2282 memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
2283
2284 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2285
2286 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2287 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2288
2289 /* Ensure that the CR/CSR is configured at the correct offset */
2290 cbar = ioread32be(bridge->base + TSI148_CBAR);
2291 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2292
2293 vstat = tsi148_slot_get(tsi148_bridge);
2294
2295 if (cbar != vstat) {
2296 cbar = vstat;
2297 dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
2298 iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2299 }
2300 dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
2301
2302 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2303 if (crat & TSI148_LCSR_CRAT_EN)
2304 dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
2305 else {
2306 dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
2307 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2308 bridge->base + TSI148_LCSR_CRAT);
2309 }
2310
2311 /* If we want flushed, error-checked writes, set up a window
2312 * over the CR/CSR registers. We read from here to safely flush
2313 * through VME writes.
2314 */
2315 if (err_chk) {
2316 retval = tsi148_master_set(bridge->flush_image, 1,
2317 (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2318 VME_D16);
2319 if (retval)
2320 dev_err(tsi148_bridge->parent, "Configuring flush image"
2321 " failed\n");
2322 }
2323
2324 return 0;
2325
2326 }
2327
2328 static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2329 struct pci_dev *pdev)
2330 {
2331 u32 crat;
2332 struct tsi148_driver *bridge;
2333
2334 bridge = tsi148_bridge->driver_priv;
2335
2336 /* Turn off CR/CSR space */
2337 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2338 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2339 bridge->base + TSI148_LCSR_CRAT);
2340
2341 /* Free image */
2342 iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2343 iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2344
2345 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
2346 bridge->crcsr_bus);
2347 }
2348
2349 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2350 {
2351 int retval, i, master_num;
2352 u32 data;
2353 struct list_head *pos = NULL, *n;
2354 struct vme_bridge *tsi148_bridge;
2355 struct tsi148_driver *tsi148_device;
2356 struct vme_master_resource *master_image;
2357 struct vme_slave_resource *slave_image;
2358 struct vme_dma_resource *dma_ctrlr;
2359 struct vme_lm_resource *lm;
2360
2361 /* If we want to support more than one of each bridge, we need to
2362 * dynamically generate this so we get one per device
2363 */
2364 tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
2365 if (tsi148_bridge == NULL) {
2366 dev_err(&pdev->dev, "Failed to allocate memory for device "
2367 "structure\n");
2368 retval = -ENOMEM;
2369 goto err_struct;
2370 }
2371
2372 tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
2373 if (tsi148_device == NULL) {
2374 dev_err(&pdev->dev, "Failed to allocate memory for device "
2375 "structure\n");
2376 retval = -ENOMEM;
2377 goto err_driver;
2378 }
2379
2380 tsi148_bridge->driver_priv = tsi148_device;
2381
2382 /* Enable the device */
2383 retval = pci_enable_device(pdev);
2384 if (retval) {
2385 dev_err(&pdev->dev, "Unable to enable device\n");
2386 goto err_enable;
2387 }
2388
2389 /* Map Registers */
2390 retval = pci_request_regions(pdev, driver_name);
2391 if (retval) {
2392 dev_err(&pdev->dev, "Unable to reserve resources\n");
2393 goto err_resource;
2394 }
2395
2396 /* map registers in BAR 0 */
2397 tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
2398 4096);
2399 if (!tsi148_device->base) {
2400 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2401 retval = -EIO;
2402 goto err_remap;
2403 }
2404
2405 /* Check to see if the mapping worked out */
2406 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2407 if (data != PCI_VENDOR_ID_TUNDRA) {
2408 dev_err(&pdev->dev, "CRG region check failed\n");
2409 retval = -EIO;
2410 goto err_test;
2411 }
2412
2413 /* Initialize wait queues & mutual exclusion flags */
2414 init_waitqueue_head(&tsi148_device->dma_queue[0]);
2415 init_waitqueue_head(&tsi148_device->dma_queue[1]);
2416 init_waitqueue_head(&tsi148_device->iack_queue);
2417 mutex_init(&tsi148_device->vme_int);
2418 mutex_init(&tsi148_device->vme_rmw);
2419
2420 tsi148_bridge->parent = &pdev->dev;
2421 strcpy(tsi148_bridge->name, driver_name);
2422
2423 /* Setup IRQ */
2424 retval = tsi148_irq_init(tsi148_bridge);
2425 if (retval != 0) {
2426 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2427 goto err_irq;
2428 }
2429
2430 /* If we are going to flush writes, we need to read from the VME bus.
2431 * We need to do this safely, thus we read the devices own CR/CSR
2432 * register. To do this we must set up a window in CR/CSR space and
2433 * hence have one less master window resource available.
2434 */
2435 master_num = TSI148_MAX_MASTER;
2436 if (err_chk) {
2437 master_num--;
2438
2439 tsi148_device->flush_image =
2440 kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
2441 if (tsi148_device->flush_image == NULL) {
2442 dev_err(&pdev->dev, "Failed to allocate memory for "
2443 "flush resource structure\n");
2444 retval = -ENOMEM;
2445 goto err_master;
2446 }
2447 tsi148_device->flush_image->parent = tsi148_bridge;
2448 spin_lock_init(&tsi148_device->flush_image->lock);
2449 tsi148_device->flush_image->locked = 1;
2450 tsi148_device->flush_image->number = master_num;
2451 memset(&tsi148_device->flush_image->bus_resource, 0,
2452 sizeof(struct resource));
2453 tsi148_device->flush_image->kern_base = NULL;
2454 }
2455
2456 /* Add master windows to list */
2457 INIT_LIST_HEAD(&tsi148_bridge->master_resources);
2458 for (i = 0; i < master_num; i++) {
2459 master_image = kmalloc(sizeof(struct vme_master_resource),
2460 GFP_KERNEL);
2461 if (master_image == NULL) {
2462 dev_err(&pdev->dev, "Failed to allocate memory for "
2463 "master resource structure\n");
2464 retval = -ENOMEM;
2465 goto err_master;
2466 }
2467 master_image->parent = tsi148_bridge;
2468 spin_lock_init(&master_image->lock);
2469 master_image->locked = 0;
2470 master_image->number = i;
2471 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2472 VME_A64;
2473 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2474 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2475 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2476 VME_PROG | VME_DATA;
2477 master_image->width_attr = VME_D16 | VME_D32;
2478 memset(&master_image->bus_resource, 0,
2479 sizeof(struct resource));
2480 master_image->kern_base = NULL;
2481 list_add_tail(&master_image->list,
2482 &tsi148_bridge->master_resources);
2483 }
2484
2485 /* Add slave windows to list */
2486 INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
2487 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2488 slave_image = kmalloc(sizeof(struct vme_slave_resource),
2489 GFP_KERNEL);
2490 if (slave_image == NULL) {
2491 dev_err(&pdev->dev, "Failed to allocate memory for "
2492 "slave resource structure\n");
2493 retval = -ENOMEM;
2494 goto err_slave;
2495 }
2496 slave_image->parent = tsi148_bridge;
2497 mutex_init(&slave_image->mtx);
2498 slave_image->locked = 0;
2499 slave_image->number = i;
2500 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2501 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2502 VME_USER3 | VME_USER4;
2503 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2504 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2505 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2506 VME_PROG | VME_DATA;
2507 list_add_tail(&slave_image->list,
2508 &tsi148_bridge->slave_resources);
2509 }
2510
2511 /* Add dma engines to list */
2512 INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
2513 for (i = 0; i < TSI148_MAX_DMA; i++) {
2514 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
2515 GFP_KERNEL);
2516 if (dma_ctrlr == NULL) {
2517 dev_err(&pdev->dev, "Failed to allocate memory for "
2518 "dma resource structure\n");
2519 retval = -ENOMEM;
2520 goto err_dma;
2521 }
2522 dma_ctrlr->parent = tsi148_bridge;
2523 mutex_init(&dma_ctrlr->mtx);
2524 dma_ctrlr->locked = 0;
2525 dma_ctrlr->number = i;
2526 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2527 VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2528 VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2529 VME_DMA_PATTERN_TO_MEM;
2530 INIT_LIST_HEAD(&dma_ctrlr->pending);
2531 INIT_LIST_HEAD(&dma_ctrlr->running);
2532 list_add_tail(&dma_ctrlr->list,
2533 &tsi148_bridge->dma_resources);
2534 }
2535
2536 /* Add location monitor to list */
2537 INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
2538 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
2539 if (lm == NULL) {
2540 dev_err(&pdev->dev, "Failed to allocate memory for "
2541 "location monitor resource structure\n");
2542 retval = -ENOMEM;
2543 goto err_lm;
2544 }
2545 lm->parent = tsi148_bridge;
2546 mutex_init(&lm->mtx);
2547 lm->locked = 0;
2548 lm->number = 1;
2549 lm->monitors = 4;
2550 list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
2551
2552 tsi148_bridge->slave_get = tsi148_slave_get;
2553 tsi148_bridge->slave_set = tsi148_slave_set;
2554 tsi148_bridge->master_get = tsi148_master_get;
2555 tsi148_bridge->master_set = tsi148_master_set;
2556 tsi148_bridge->master_read = tsi148_master_read;
2557 tsi148_bridge->master_write = tsi148_master_write;
2558 tsi148_bridge->master_rmw = tsi148_master_rmw;
2559 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2560 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2561 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2562 tsi148_bridge->irq_set = tsi148_irq_set;
2563 tsi148_bridge->irq_generate = tsi148_irq_generate;
2564 tsi148_bridge->lm_set = tsi148_lm_set;
2565 tsi148_bridge->lm_get = tsi148_lm_get;
2566 tsi148_bridge->lm_attach = tsi148_lm_attach;
2567 tsi148_bridge->lm_detach = tsi148_lm_detach;
2568 tsi148_bridge->slot_get = tsi148_slot_get;
2569 tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
2570 tsi148_bridge->free_consistent = tsi148_free_consistent;
2571
2572 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2573 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2574 (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
2575 if (!geoid)
2576 dev_info(&pdev->dev, "VME geographical address is %d\n",
2577 data & TSI148_LCSR_VSTAT_GA_M);
2578 else
2579 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2580 geoid);
2581
2582 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2583 err_chk ? "enabled" : "disabled");
2584
2585 retval = tsi148_crcsr_init(tsi148_bridge, pdev);
2586 if (retval) {
2587 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2588 goto err_crcsr;
2589 }
2590
2591 retval = vme_register_bridge(tsi148_bridge);
2592 if (retval != 0) {
2593 dev_err(&pdev->dev, "Chip Registration failed.\n");
2594 goto err_reg;
2595 }
2596
2597 pci_set_drvdata(pdev, tsi148_bridge);
2598
2599 /* Clear VME bus "board fail", and "power-up reset" lines */
2600 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2601 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2602 data |= TSI148_LCSR_VSTAT_CPURST;
2603 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2604
2605 return 0;
2606
2607 err_reg:
2608 tsi148_crcsr_exit(tsi148_bridge, pdev);
2609 err_crcsr:
2610 err_lm:
2611 /* resources are stored in link list */
2612 list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) {
2613 lm = list_entry(pos, struct vme_lm_resource, list);
2614 list_del(pos);
2615 kfree(lm);
2616 }
2617 err_dma:
2618 /* resources are stored in link list */
2619 list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) {
2620 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2621 list_del(pos);
2622 kfree(dma_ctrlr);
2623 }
2624 err_slave:
2625 /* resources are stored in link list */
2626 list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) {
2627 slave_image = list_entry(pos, struct vme_slave_resource, list);
2628 list_del(pos);
2629 kfree(slave_image);
2630 }
2631 err_master:
2632 /* resources are stored in link list */
2633 list_for_each_safe(pos, n, &tsi148_bridge->master_resources) {
2634 master_image = list_entry(pos, struct vme_master_resource,
2635 list);
2636 list_del(pos);
2637 kfree(master_image);
2638 }
2639
2640 tsi148_irq_exit(tsi148_bridge, pdev);
2641 err_irq:
2642 err_test:
2643 iounmap(tsi148_device->base);
2644 err_remap:
2645 pci_release_regions(pdev);
2646 err_resource:
2647 pci_disable_device(pdev);
2648 err_enable:
2649 kfree(tsi148_device);
2650 err_driver:
2651 kfree(tsi148_bridge);
2652 err_struct:
2653 return retval;
2654
2655 }
2656
2657 static void tsi148_remove(struct pci_dev *pdev)
2658 {
2659 struct list_head *pos = NULL;
2660 struct list_head *tmplist;
2661 struct vme_master_resource *master_image;
2662 struct vme_slave_resource *slave_image;
2663 struct vme_dma_resource *dma_ctrlr;
2664 int i;
2665 struct tsi148_driver *bridge;
2666 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2667
2668 bridge = tsi148_bridge->driver_priv;
2669
2670
2671 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2672
2673 /*
2674 * Shutdown all inbound and outbound windows.
2675 */
2676 for (i = 0; i < 8; i++) {
2677 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2678 TSI148_LCSR_OFFSET_ITAT);
2679 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2680 TSI148_LCSR_OFFSET_OTAT);
2681 }
2682
2683 /*
2684 * Shutdown Location monitor.
2685 */
2686 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2687
2688 /*
2689 * Shutdown CRG map.
2690 */
2691 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2692
2693 /*
2694 * Clear error status.
2695 */
2696 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2697 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2698 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2699
2700 /*
2701 * Remove VIRQ interrupt (if any)
2702 */
2703 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2704 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2705
2706 /*
2707 * Map all Interrupts to PCI INTA
2708 */
2709 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2710 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2711
2712 tsi148_irq_exit(tsi148_bridge, pdev);
2713
2714 vme_unregister_bridge(tsi148_bridge);
2715
2716 tsi148_crcsr_exit(tsi148_bridge, pdev);
2717
2718 /* resources are stored in link list */
2719 list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
2720 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2721 list_del(pos);
2722 kfree(dma_ctrlr);
2723 }
2724
2725 /* resources are stored in link list */
2726 list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
2727 slave_image = list_entry(pos, struct vme_slave_resource, list);
2728 list_del(pos);
2729 kfree(slave_image);
2730 }
2731
2732 /* resources are stored in link list */
2733 list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
2734 master_image = list_entry(pos, struct vme_master_resource,
2735 list);
2736 list_del(pos);
2737 kfree(master_image);
2738 }
2739
2740 iounmap(bridge->base);
2741
2742 pci_release_regions(pdev);
2743
2744 pci_disable_device(pdev);
2745
2746 kfree(tsi148_bridge->driver_priv);
2747
2748 kfree(tsi148_bridge);
2749 }
2750
2751 module_pci_driver(tsi148_driver);
2752
2753 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2754 module_param(err_chk, bool, 0);
2755
2756 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2757 module_param(geoid, int, 0);
2758
2759 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2760 MODULE_LICENSE("GPL");
This page took 0.092039 seconds and 5 git commands to generate.