Commit | Line | Data |
---|---|---|
ecba38ab | 1 | /* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. |
1da177e4 LT |
2 | * Once again I am out to prove that every ethernet |
3 | * controller out there can be most efficiently programmed | |
4 | * if you make it look like a LANCE. | |
5 | * | |
8e912b33 | 6 | * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net) |
1da177e4 LT |
7 | */ |
8 | ||
1da177e4 LT |
9 | #include <linux/module.h> |
10 | #include <linux/kernel.h> | |
11 | #include <linux/types.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/fcntl.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/ioport.h> | |
16 | #include <linux/in.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/string.h> | |
19 | #include <linux/delay.h> | |
20 | #include <linux/init.h> | |
21 | #include <linux/crc32.h> | |
22 | #include <linux/netdevice.h> | |
23 | #include <linux/etherdevice.h> | |
24 | #include <linux/skbuff.h> | |
25 | #include <linux/ethtool.h> | |
26 | #include <linux/bitops.h> | |
738f2b7b | 27 | #include <linux/dma-mapping.h> |
8e912b33 DM |
28 | #include <linux/of.h> |
29 | #include <linux/of_device.h> | |
1da177e4 | 30 | |
1da177e4 LT |
31 | #include <asm/io.h> |
32 | #include <asm/dma.h> | |
33 | #include <asm/byteorder.h> | |
34 | #include <asm/idprom.h> | |
1da177e4 LT |
35 | #include <asm/openprom.h> |
36 | #include <asm/oplib.h> | |
37 | #include <asm/auxio.h> | |
38 | #include <asm/pgtable.h> | |
39 | #include <asm/irq.h> | |
40 | ||
41 | #include "sunqe.h" | |
42 | ||
10158286 | 43 | #define DRV_NAME "sunqe" |
8e912b33 DM |
44 | #define DRV_VERSION "4.1" |
45 | #define DRV_RELDATE "August 27, 2008" | |
ecba38ab | 46 | #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" |
10158286 TC |
47 | |
48 | static char version[] = | |
49 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; | |
50 | ||
51 | MODULE_VERSION(DRV_VERSION); | |
52 | MODULE_AUTHOR(DRV_AUTHOR); | |
53 | MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver"); | |
54 | MODULE_LICENSE("GPL"); | |
55 | ||
1da177e4 LT |
56 | static struct sunqec *root_qec_dev; |
57 | ||
58 | static void qe_set_multicast(struct net_device *dev); | |
59 | ||
60 | #define QEC_RESET_TRIES 200 | |
61 | ||
62 | static inline int qec_global_reset(void __iomem *gregs) | |
63 | { | |
64 | int tries = QEC_RESET_TRIES; | |
65 | ||
66 | sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); | |
67 | while (--tries) { | |
68 | u32 tmp = sbus_readl(gregs + GLOB_CTRL); | |
69 | if (tmp & GLOB_CTRL_RESET) { | |
70 | udelay(20); | |
71 | continue; | |
72 | } | |
73 | break; | |
74 | } | |
75 | if (tries) | |
76 | return 0; | |
77 | printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n"); | |
78 | return -1; | |
79 | } | |
80 | ||
81 | #define MACE_RESET_RETRIES 200 | |
82 | #define QE_RESET_RETRIES 200 | |
83 | ||
84 | static inline int qe_stop(struct sunqe *qep) | |
85 | { | |
86 | void __iomem *cregs = qep->qcregs; | |
87 | void __iomem *mregs = qep->mregs; | |
88 | int tries; | |
89 | ||
90 | /* Reset the MACE, then the QEC channel. */ | |
91 | sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG); | |
92 | tries = MACE_RESET_RETRIES; | |
93 | while (--tries) { | |
94 | u8 tmp = sbus_readb(mregs + MREGS_BCONFIG); | |
95 | if (tmp & MREGS_BCONFIG_RESET) { | |
96 | udelay(20); | |
97 | continue; | |
98 | } | |
99 | break; | |
100 | } | |
101 | if (!tries) { | |
102 | printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n"); | |
103 | return -1; | |
104 | } | |
105 | ||
106 | sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL); | |
107 | tries = QE_RESET_RETRIES; | |
108 | while (--tries) { | |
109 | u32 tmp = sbus_readl(cregs + CREG_CTRL); | |
110 | if (tmp & CREG_CTRL_RESET) { | |
111 | udelay(20); | |
112 | continue; | |
113 | } | |
114 | break; | |
115 | } | |
116 | if (!tries) { | |
117 | printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n"); | |
118 | return -1; | |
119 | } | |
120 | return 0; | |
121 | } | |
122 | ||
123 | static void qe_init_rings(struct sunqe *qep) | |
124 | { | |
125 | struct qe_init_block *qb = qep->qe_block; | |
126 | struct sunqe_buffers *qbufs = qep->buffers; | |
127 | __u32 qbufs_dvma = qep->buffers_dvma; | |
128 | int i; | |
129 | ||
130 | qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; | |
131 | memset(qb, 0, sizeof(struct qe_init_block)); | |
132 | memset(qbufs, 0, sizeof(struct sunqe_buffers)); | |
133 | for (i = 0; i < RX_RING_SIZE; i++) { | |
134 | qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i); | |
135 | qb->qe_rxd[i].rx_flags = | |
136 | (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); | |
137 | } | |
138 | } | |
139 | ||
140 | static int qe_init(struct sunqe *qep, int from_irq) | |
141 | { | |
142 | struct sunqec *qecp = qep->parent; | |
143 | void __iomem *cregs = qep->qcregs; | |
144 | void __iomem *mregs = qep->mregs; | |
145 | void __iomem *gregs = qecp->gregs; | |
146 | unsigned char *e = &qep->dev->dev_addr[0]; | |
147 | u32 tmp; | |
148 | int i; | |
149 | ||
150 | /* Shut it up. */ | |
151 | if (qe_stop(qep)) | |
152 | return -EAGAIN; | |
153 | ||
154 | /* Setup initial rx/tx init block pointers. */ | |
155 | sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); | |
156 | sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); | |
157 | ||
158 | /* Enable/mask the various irq's. */ | |
159 | sbus_writel(0, cregs + CREG_RIMASK); | |
160 | sbus_writel(1, cregs + CREG_TIMASK); | |
161 | ||
162 | sbus_writel(0, cregs + CREG_QMASK); | |
163 | sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK); | |
164 | ||
165 | /* Setup the FIFO pointers into QEC local memory. */ | |
166 | tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE); | |
167 | sbus_writel(tmp, cregs + CREG_RXRBUFPTR); | |
168 | sbus_writel(tmp, cregs + CREG_RXWBUFPTR); | |
169 | ||
170 | tmp = sbus_readl(cregs + CREG_RXRBUFPTR) + | |
171 | sbus_readl(gregs + GLOB_RSIZE); | |
172 | sbus_writel(tmp, cregs + CREG_TXRBUFPTR); | |
173 | sbus_writel(tmp, cregs + CREG_TXWBUFPTR); | |
174 | ||
175 | /* Clear the channel collision counter. */ | |
176 | sbus_writel(0, cregs + CREG_CCNT); | |
177 | ||
178 | /* For 10baseT, inter frame space nor throttle seems to be necessary. */ | |
179 | sbus_writel(0, cregs + CREG_PIPG); | |
180 | ||
181 | /* Now dork with the AMD MACE. */ | |
182 | sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG); | |
183 | sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL); | |
184 | sbus_writeb(0, mregs + MREGS_RXFCNTL); | |
185 | ||
186 | /* The QEC dma's the rx'd packets from local memory out to main memory, | |
187 | * and therefore it interrupts when the packet reception is "complete". | |
188 | * So don't listen for the MACE talking about it. | |
189 | */ | |
190 | sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK); | |
191 | sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG); | |
192 | sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 | | |
193 | MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU), | |
194 | mregs + MREGS_FCONFIG); | |
195 | ||
196 | /* Only usable interface on QuadEther is twisted pair. */ | |
197 | sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG); | |
198 | ||
199 | /* Tell MACE we are changing the ether address. */ | |
200 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET, | |
201 | mregs + MREGS_IACONFIG); | |
202 | while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) | |
203 | barrier(); | |
204 | sbus_writeb(e[0], mregs + MREGS_ETHADDR); | |
205 | sbus_writeb(e[1], mregs + MREGS_ETHADDR); | |
206 | sbus_writeb(e[2], mregs + MREGS_ETHADDR); | |
207 | sbus_writeb(e[3], mregs + MREGS_ETHADDR); | |
208 | sbus_writeb(e[4], mregs + MREGS_ETHADDR); | |
209 | sbus_writeb(e[5], mregs + MREGS_ETHADDR); | |
210 | ||
211 | /* Clear out the address filter. */ | |
212 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, | |
213 | mregs + MREGS_IACONFIG); | |
214 | while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) | |
215 | barrier(); | |
216 | for (i = 0; i < 8; i++) | |
217 | sbus_writeb(0, mregs + MREGS_FILTER); | |
218 | ||
219 | /* Address changes are now complete. */ | |
220 | sbus_writeb(0, mregs + MREGS_IACONFIG); | |
221 | ||
222 | qe_init_rings(qep); | |
223 | ||
224 | /* Wait a little bit for the link to come up... */ | |
225 | mdelay(5); | |
226 | if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) { | |
227 | int tries = 50; | |
228 | ||
46578a69 | 229 | while (--tries) { |
1da177e4 LT |
230 | u8 tmp; |
231 | ||
232 | mdelay(5); | |
233 | barrier(); | |
234 | tmp = sbus_readb(mregs + MREGS_PHYCONFIG); | |
235 | if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0) | |
236 | break; | |
237 | } | |
238 | if (tries == 0) | |
239 | printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name); | |
240 | } | |
241 | ||
242 | /* Missed packet counter is cleared on a read. */ | |
243 | sbus_readb(mregs + MREGS_MPCNT); | |
244 | ||
245 | /* Reload multicast information, this will enable the receiver | |
246 | * and transmitter. | |
247 | */ | |
248 | qe_set_multicast(qep->dev); | |
249 | ||
250 | /* QEC should now start to show interrupts. */ | |
251 | return 0; | |
252 | } | |
253 | ||
254 | /* Grrr, certain error conditions completely lock up the AMD MACE, | |
255 | * so when we get these we _must_ reset the chip. | |
256 | */ | |
257 | static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) | |
258 | { | |
259 | struct net_device *dev = qep->dev; | |
260 | int mace_hwbug_workaround = 0; | |
261 | ||
262 | if (qe_status & CREG_STAT_EDEFER) { | |
263 | printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); | |
09f75cd7 | 264 | dev->stats.tx_errors++; |
1da177e4 LT |
265 | } |
266 | ||
267 | if (qe_status & CREG_STAT_CLOSS) { | |
268 | printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); | |
09f75cd7 JG |
269 | dev->stats.tx_errors++; |
270 | dev->stats.tx_carrier_errors++; | |
1da177e4 LT |
271 | } |
272 | ||
273 | if (qe_status & CREG_STAT_ERETRIES) { | |
274 | printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); | |
09f75cd7 | 275 | dev->stats.tx_errors++; |
1da177e4 LT |
276 | mace_hwbug_workaround = 1; |
277 | } | |
278 | ||
279 | if (qe_status & CREG_STAT_LCOLL) { | |
280 | printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); | |
09f75cd7 JG |
281 | dev->stats.tx_errors++; |
282 | dev->stats.collisions++; | |
1da177e4 LT |
283 | mace_hwbug_workaround = 1; |
284 | } | |
285 | ||
286 | if (qe_status & CREG_STAT_FUFLOW) { | |
287 | printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); | |
09f75cd7 | 288 | dev->stats.tx_errors++; |
1da177e4 LT |
289 | mace_hwbug_workaround = 1; |
290 | } | |
291 | ||
292 | if (qe_status & CREG_STAT_JERROR) { | |
293 | printk(KERN_ERR "%s: Jabber error.\n", dev->name); | |
294 | } | |
295 | ||
296 | if (qe_status & CREG_STAT_BERROR) { | |
297 | printk(KERN_ERR "%s: Babble error.\n", dev->name); | |
298 | } | |
299 | ||
300 | if (qe_status & CREG_STAT_CCOFLOW) { | |
09f75cd7 JG |
301 | dev->stats.tx_errors += 256; |
302 | dev->stats.collisions += 256; | |
1da177e4 LT |
303 | } |
304 | ||
305 | if (qe_status & CREG_STAT_TXDERROR) { | |
306 | printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); | |
09f75cd7 JG |
307 | dev->stats.tx_errors++; |
308 | dev->stats.tx_aborted_errors++; | |
1da177e4 LT |
309 | mace_hwbug_workaround = 1; |
310 | } | |
311 | ||
312 | if (qe_status & CREG_STAT_TXLERR) { | |
313 | printk(KERN_ERR "%s: Transmit late error.\n", dev->name); | |
09f75cd7 | 314 | dev->stats.tx_errors++; |
1da177e4 LT |
315 | mace_hwbug_workaround = 1; |
316 | } | |
317 | ||
318 | if (qe_status & CREG_STAT_TXPERR) { | |
319 | printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); | |
09f75cd7 JG |
320 | dev->stats.tx_errors++; |
321 | dev->stats.tx_aborted_errors++; | |
1da177e4 LT |
322 | mace_hwbug_workaround = 1; |
323 | } | |
324 | ||
325 | if (qe_status & CREG_STAT_TXSERR) { | |
326 | printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); | |
09f75cd7 JG |
327 | dev->stats.tx_errors++; |
328 | dev->stats.tx_aborted_errors++; | |
1da177e4 LT |
329 | mace_hwbug_workaround = 1; |
330 | } | |
331 | ||
332 | if (qe_status & CREG_STAT_RCCOFLOW) { | |
09f75cd7 JG |
333 | dev->stats.rx_errors += 256; |
334 | dev->stats.collisions += 256; | |
1da177e4 LT |
335 | } |
336 | ||
337 | if (qe_status & CREG_STAT_RUOFLOW) { | |
09f75cd7 JG |
338 | dev->stats.rx_errors += 256; |
339 | dev->stats.rx_over_errors += 256; | |
1da177e4 LT |
340 | } |
341 | ||
342 | if (qe_status & CREG_STAT_MCOFLOW) { | |
09f75cd7 JG |
343 | dev->stats.rx_errors += 256; |
344 | dev->stats.rx_missed_errors += 256; | |
1da177e4 LT |
345 | } |
346 | ||
347 | if (qe_status & CREG_STAT_RXFOFLOW) { | |
348 | printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); | |
09f75cd7 JG |
349 | dev->stats.rx_errors++; |
350 | dev->stats.rx_over_errors++; | |
1da177e4 LT |
351 | } |
352 | ||
353 | if (qe_status & CREG_STAT_RLCOLL) { | |
354 | printk(KERN_ERR "%s: Late receive collision.\n", dev->name); | |
09f75cd7 JG |
355 | dev->stats.rx_errors++; |
356 | dev->stats.collisions++; | |
1da177e4 LT |
357 | } |
358 | ||
359 | if (qe_status & CREG_STAT_FCOFLOW) { | |
09f75cd7 JG |
360 | dev->stats.rx_errors += 256; |
361 | dev->stats.rx_frame_errors += 256; | |
1da177e4 LT |
362 | } |
363 | ||
364 | if (qe_status & CREG_STAT_CECOFLOW) { | |
09f75cd7 JG |
365 | dev->stats.rx_errors += 256; |
366 | dev->stats.rx_crc_errors += 256; | |
1da177e4 LT |
367 | } |
368 | ||
369 | if (qe_status & CREG_STAT_RXDROP) { | |
370 | printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); | |
09f75cd7 JG |
371 | dev->stats.rx_errors++; |
372 | dev->stats.rx_dropped++; | |
373 | dev->stats.rx_missed_errors++; | |
1da177e4 LT |
374 | } |
375 | ||
376 | if (qe_status & CREG_STAT_RXSMALL) { | |
377 | printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); | |
09f75cd7 JG |
378 | dev->stats.rx_errors++; |
379 | dev->stats.rx_length_errors++; | |
1da177e4 LT |
380 | } |
381 | ||
382 | if (qe_status & CREG_STAT_RXLERR) { | |
383 | printk(KERN_ERR "%s: Receive late error.\n", dev->name); | |
09f75cd7 | 384 | dev->stats.rx_errors++; |
1da177e4 LT |
385 | mace_hwbug_workaround = 1; |
386 | } | |
387 | ||
388 | if (qe_status & CREG_STAT_RXPERR) { | |
389 | printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); | |
09f75cd7 JG |
390 | dev->stats.rx_errors++; |
391 | dev->stats.rx_missed_errors++; | |
1da177e4 LT |
392 | mace_hwbug_workaround = 1; |
393 | } | |
394 | ||
395 | if (qe_status & CREG_STAT_RXSERR) { | |
396 | printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); | |
09f75cd7 JG |
397 | dev->stats.rx_errors++; |
398 | dev->stats.rx_missed_errors++; | |
1da177e4 LT |
399 | mace_hwbug_workaround = 1; |
400 | } | |
401 | ||
402 | if (mace_hwbug_workaround) | |
403 | qe_init(qep, 1); | |
404 | return mace_hwbug_workaround; | |
405 | } | |
406 | ||
407 | /* Per-QE receive interrupt service routine. Just like on the happy meal | |
408 | * we receive directly into skb's with a small packet copy water mark. | |
409 | */ | |
410 | static void qe_rx(struct sunqe *qep) | |
411 | { | |
412 | struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; | |
09f75cd7 | 413 | struct net_device *dev = qep->dev; |
1da177e4 LT |
414 | struct qe_rxd *this; |
415 | struct sunqe_buffers *qbufs = qep->buffers; | |
416 | __u32 qbufs_dvma = qep->buffers_dvma; | |
720a43ef | 417 | int elem = qep->rx_new; |
1da177e4 LT |
418 | u32 flags; |
419 | ||
420 | this = &rxbase[elem]; | |
421 | while (!((flags = this->rx_flags) & RXD_OWN)) { | |
422 | struct sk_buff *skb; | |
423 | unsigned char *this_qbuf = | |
424 | &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0]; | |
425 | __u32 this_qbuf_dvma = qbufs_dvma + | |
426 | qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1))); | |
427 | struct qe_rxd *end_rxd = | |
428 | &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)]; | |
429 | int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */ | |
430 | ||
431 | /* Check for errors. */ | |
432 | if (len < ETH_ZLEN) { | |
09f75cd7 JG |
433 | dev->stats.rx_errors++; |
434 | dev->stats.rx_length_errors++; | |
435 | dev->stats.rx_dropped++; | |
1da177e4 | 436 | } else { |
dae2e9f4 | 437 | skb = netdev_alloc_skb(dev, len + 2); |
1da177e4 | 438 | if (skb == NULL) { |
09f75cd7 | 439 | dev->stats.rx_dropped++; |
1da177e4 | 440 | } else { |
1da177e4 LT |
441 | skb_reserve(skb, 2); |
442 | skb_put(skb, len); | |
64699336 | 443 | skb_copy_to_linear_data(skb, this_qbuf, |
8c7b7faa | 444 | len); |
1da177e4 LT |
445 | skb->protocol = eth_type_trans(skb, qep->dev); |
446 | netif_rx(skb); | |
09f75cd7 JG |
447 | dev->stats.rx_packets++; |
448 | dev->stats.rx_bytes += len; | |
1da177e4 LT |
449 | } |
450 | } | |
451 | end_rxd->rx_addr = this_qbuf_dvma; | |
452 | end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); | |
6aa20a22 | 453 | |
1da177e4 LT |
454 | elem = NEXT_RX(elem); |
455 | this = &rxbase[elem]; | |
456 | } | |
457 | qep->rx_new = elem; | |
1da177e4 LT |
458 | } |
459 | ||
460 | static void qe_tx_reclaim(struct sunqe *qep); | |
461 | ||
462 | /* Interrupts for all QE's get filtered out via the QEC master controller, | |
463 | * so we just run through each qe and check to see who is signaling | |
464 | * and thus needs to be serviced. | |
465 | */ | |
7d12e780 | 466 | static irqreturn_t qec_interrupt(int irq, void *dev_id) |
1da177e4 | 467 | { |
c31f28e7 | 468 | struct sunqec *qecp = dev_id; |
1da177e4 LT |
469 | u32 qec_status; |
470 | int channel = 0; | |
471 | ||
472 | /* Latch the status now. */ | |
473 | qec_status = sbus_readl(qecp->gregs + GLOB_STAT); | |
474 | while (channel < 4) { | |
475 | if (qec_status & 0xf) { | |
476 | struct sunqe *qep = qecp->qes[channel]; | |
477 | u32 qe_status; | |
478 | ||
479 | qe_status = sbus_readl(qep->qcregs + CREG_STAT); | |
480 | if (qe_status & CREG_STAT_ERRORS) { | |
481 | if (qe_is_bolixed(qep, qe_status)) | |
482 | goto next; | |
483 | } | |
484 | if (qe_status & CREG_STAT_RXIRQ) | |
485 | qe_rx(qep); | |
486 | if (netif_queue_stopped(qep->dev) && | |
487 | (qe_status & CREG_STAT_TXIRQ)) { | |
488 | spin_lock(&qep->lock); | |
489 | qe_tx_reclaim(qep); | |
490 | if (TX_BUFFS_AVAIL(qep) > 0) { | |
491 | /* Wake net queue and return to | |
492 | * lazy tx reclaim. | |
493 | */ | |
494 | netif_wake_queue(qep->dev); | |
495 | sbus_writel(1, qep->qcregs + CREG_TIMASK); | |
496 | } | |
497 | spin_unlock(&qep->lock); | |
498 | } | |
499 | next: | |
500 | ; | |
501 | } | |
502 | qec_status >>= 4; | |
503 | channel++; | |
504 | } | |
505 | ||
506 | return IRQ_HANDLED; | |
507 | } | |
508 | ||
509 | static int qe_open(struct net_device *dev) | |
510 | { | |
8f15ea42 | 511 | struct sunqe *qep = netdev_priv(dev); |
1da177e4 LT |
512 | |
513 | qep->mconfig = (MREGS_MCONFIG_TXENAB | | |
514 | MREGS_MCONFIG_RXENAB | | |
515 | MREGS_MCONFIG_MBAENAB); | |
516 | return qe_init(qep, 0); | |
517 | } | |
518 | ||
519 | static int qe_close(struct net_device *dev) | |
520 | { | |
8f15ea42 | 521 | struct sunqe *qep = netdev_priv(dev); |
1da177e4 LT |
522 | |
523 | qe_stop(qep); | |
524 | return 0; | |
525 | } | |
526 | ||
527 | /* Reclaim TX'd frames from the ring. This must always run under | |
528 | * the IRQ protected qep->lock. | |
529 | */ | |
530 | static void qe_tx_reclaim(struct sunqe *qep) | |
531 | { | |
532 | struct qe_txd *txbase = &qep->qe_block->qe_txd[0]; | |
533 | int elem = qep->tx_old; | |
534 | ||
535 | while (elem != qep->tx_new) { | |
536 | u32 flags = txbase[elem].tx_flags; | |
537 | ||
538 | if (flags & TXD_OWN) | |
539 | break; | |
540 | elem = NEXT_TX(elem); | |
541 | } | |
542 | qep->tx_old = elem; | |
543 | } | |
544 | ||
545 | static void qe_tx_timeout(struct net_device *dev) | |
546 | { | |
8f15ea42 | 547 | struct sunqe *qep = netdev_priv(dev); |
1da177e4 LT |
548 | int tx_full; |
549 | ||
550 | spin_lock_irq(&qep->lock); | |
551 | ||
552 | /* Try to reclaim, if that frees up some tx | |
553 | * entries, we're fine. | |
554 | */ | |
555 | qe_tx_reclaim(qep); | |
556 | tx_full = TX_BUFFS_AVAIL(qep) <= 0; | |
557 | ||
558 | spin_unlock_irq(&qep->lock); | |
559 | ||
560 | if (! tx_full) | |
561 | goto out; | |
562 | ||
563 | printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); | |
564 | qe_init(qep, 1); | |
565 | ||
566 | out: | |
567 | netif_wake_queue(dev); | |
568 | } | |
569 | ||
570 | /* Get a packet queued to go onto the wire. */ | |
571 | static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
572 | { | |
8f15ea42 | 573 | struct sunqe *qep = netdev_priv(dev); |
1da177e4 LT |
574 | struct sunqe_buffers *qbufs = qep->buffers; |
575 | __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; | |
576 | unsigned char *txbuf; | |
577 | int len, entry; | |
578 | ||
579 | spin_lock_irq(&qep->lock); | |
580 | ||
581 | qe_tx_reclaim(qep); | |
582 | ||
583 | len = skb->len; | |
584 | entry = qep->tx_new; | |
585 | ||
586 | txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0]; | |
587 | txbuf_dvma = qbufs_dvma + | |
588 | qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1))); | |
589 | ||
590 | /* Avoid a race... */ | |
591 | qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; | |
592 | ||
d626f62b | 593 | skb_copy_from_linear_data(skb, txbuf, len); |
1da177e4 LT |
594 | |
595 | qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; | |
596 | qep->qe_block->qe_txd[entry].tx_flags = | |
597 | (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); | |
598 | qep->tx_new = NEXT_TX(entry); | |
599 | ||
600 | /* Get it going. */ | |
1da177e4 LT |
601 | sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); |
602 | ||
09f75cd7 JG |
603 | dev->stats.tx_packets++; |
604 | dev->stats.tx_bytes += len; | |
1da177e4 LT |
605 | |
606 | if (TX_BUFFS_AVAIL(qep) <= 0) { | |
607 | /* Halt the net queue and enable tx interrupts. | |
608 | * When the tx queue empties the tx irq handler | |
609 | * will wake up the queue and return us back to | |
610 | * the lazy tx reclaim scheme. | |
611 | */ | |
612 | netif_stop_queue(dev); | |
613 | sbus_writel(0, qep->qcregs + CREG_TIMASK); | |
614 | } | |
615 | spin_unlock_irq(&qep->lock); | |
616 | ||
617 | dev_kfree_skb(skb); | |
618 | ||
6ed10654 | 619 | return NETDEV_TX_OK; |
1da177e4 LT |
620 | } |
621 | ||
1da177e4 LT |
622 | static void qe_set_multicast(struct net_device *dev) |
623 | { | |
8f15ea42 | 624 | struct sunqe *qep = netdev_priv(dev); |
22bedad3 | 625 | struct netdev_hw_addr *ha; |
1da177e4 | 626 | u8 new_mconfig = qep->mconfig; |
1da177e4 LT |
627 | int i; |
628 | u32 crc; | |
629 | ||
630 | /* Lock out others. */ | |
631 | netif_stop_queue(dev); | |
632 | ||
4cd24eaf | 633 | if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { |
1da177e4 LT |
634 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, |
635 | qep->mregs + MREGS_IACONFIG); | |
636 | while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) | |
637 | barrier(); | |
638 | for (i = 0; i < 8; i++) | |
639 | sbus_writeb(0xff, qep->mregs + MREGS_FILTER); | |
640 | sbus_writeb(0, qep->mregs + MREGS_IACONFIG); | |
641 | } else if (dev->flags & IFF_PROMISC) { | |
642 | new_mconfig |= MREGS_MCONFIG_PROMISC; | |
643 | } else { | |
644 | u16 hash_table[4]; | |
645 | u8 *hbytes = (unsigned char *) &hash_table[0]; | |
646 | ||
5508590c | 647 | memset(hash_table, 0, sizeof(hash_table)); |
22bedad3 | 648 | netdev_for_each_mc_addr(ha, dev) { |
498d8e23 | 649 | crc = ether_crc_le(6, ha->addr); |
1da177e4 LT |
650 | crc >>= 26; |
651 | hash_table[crc >> 4] |= 1 << (crc & 0xf); | |
652 | } | |
653 | /* Program the qe with the new filter value. */ | |
654 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, | |
655 | qep->mregs + MREGS_IACONFIG); | |
656 | while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) | |
657 | barrier(); | |
658 | for (i = 0; i < 8; i++) { | |
659 | u8 tmp = *hbytes++; | |
660 | sbus_writeb(tmp, qep->mregs + MREGS_FILTER); | |
661 | } | |
662 | sbus_writeb(0, qep->mregs + MREGS_IACONFIG); | |
663 | } | |
664 | ||
665 | /* Any change of the logical address filter, the physical address, | |
666 | * or enabling/disabling promiscuous mode causes the MACE to disable | |
667 | * the receiver. So we must re-enable them here or else the MACE | |
668 | * refuses to listen to anything on the network. Sheesh, took | |
669 | * me a day or two to find this bug. | |
670 | */ | |
671 | qep->mconfig = new_mconfig; | |
672 | sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG); | |
673 | ||
674 | /* Let us get going again. */ | |
675 | netif_wake_queue(dev); | |
676 | } | |
677 | ||
678 | /* Ethtool support... */ | |
679 | static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | |
680 | { | |
8e912b33 | 681 | const struct linux_prom_registers *regs; |
8f15ea42 | 682 | struct sunqe *qep = netdev_priv(dev); |
2dc11581 | 683 | struct platform_device *op; |
1da177e4 | 684 | |
7826d43f JP |
685 | strlcpy(info->driver, "sunqe", sizeof(info->driver)); |
686 | strlcpy(info->version, "3.0", sizeof(info->version)); | |
8e912b33 DM |
687 | |
688 | op = qep->op; | |
61c7a080 | 689 | regs = of_get_property(op->dev.of_node, "reg", NULL); |
8e912b33 | 690 | if (regs) |
7826d43f JP |
691 | snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d", |
692 | regs->which_io); | |
8e912b33 | 693 | |
1da177e4 LT |
694 | } |
695 | ||
696 | static u32 qe_get_link(struct net_device *dev) | |
697 | { | |
8f15ea42 | 698 | struct sunqe *qep = netdev_priv(dev); |
1da177e4 LT |
699 | void __iomem *mregs = qep->mregs; |
700 | u8 phyconfig; | |
701 | ||
702 | spin_lock_irq(&qep->lock); | |
703 | phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG); | |
704 | spin_unlock_irq(&qep->lock); | |
705 | ||
807540ba | 706 | return phyconfig & MREGS_PHYCONFIG_LSTAT; |
1da177e4 LT |
707 | } |
708 | ||
7282d491 | 709 | static const struct ethtool_ops qe_ethtool_ops = { |
1da177e4 LT |
710 | .get_drvinfo = qe_get_drvinfo, |
711 | .get_link = qe_get_link, | |
712 | }; | |
713 | ||
714 | /* This is only called once at boot time for each card probed. */ | |
2dc11581 | 715 | static void qec_init_once(struct sunqec *qecp, struct platform_device *op) |
1da177e4 LT |
716 | { |
717 | u8 bsizes = qecp->qec_bursts; | |
718 | ||
63237eeb | 719 | if (sbus_can_burst64() && (bsizes & DMA_BURST64)) { |
1da177e4 LT |
720 | sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL); |
721 | } else if (bsizes & DMA_BURST32) { | |
722 | sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL); | |
723 | } else { | |
724 | sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL); | |
725 | } | |
726 | ||
727 | /* Packetsize only used in 100baseT BigMAC configurations, | |
728 | * set it to zero just to be on the safe side. | |
729 | */ | |
730 | sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE); | |
731 | ||
732 | /* Set the local memsize register, divided up to one piece per QE channel. */ | |
8e912b33 | 733 | sbus_writel((resource_size(&op->resource[1]) >> 2), |
1da177e4 LT |
734 | qecp->gregs + GLOB_MSIZE); |
735 | ||
736 | /* Divide up the local QEC memory amongst the 4 QE receiver and | |
737 | * transmitter FIFOs. Basically it is (total / 2 / num_channels). | |
738 | */ | |
8e912b33 | 739 | sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, |
1da177e4 | 740 | qecp->gregs + GLOB_TSIZE); |
8e912b33 | 741 | sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, |
1da177e4 LT |
742 | qecp->gregs + GLOB_RSIZE); |
743 | } | |
744 | ||
f73d12bd | 745 | static u8 qec_get_burst(struct device_node *dp) |
1da177e4 | 746 | { |
1da177e4 | 747 | u8 bsizes, bsizes_more; |
1da177e4 | 748 | |
ecba38ab DM |
749 | /* Find and set the burst sizes for the QEC, since it |
750 | * does the actual dma for all 4 channels. | |
751 | */ | |
752 | bsizes = of_getintprop_default(dp, "burst-sizes", 0xff); | |
753 | bsizes &= 0xff; | |
754 | bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff); | |
1da177e4 | 755 | |
ecba38ab DM |
756 | if (bsizes_more != 0xff) |
757 | bsizes &= bsizes_more; | |
758 | if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || | |
759 | (bsizes & DMA_BURST32)==0) | |
760 | bsizes = (DMA_BURST32 - 1); | |
1da177e4 | 761 | |
ecba38ab DM |
762 | return bsizes; |
763 | } | |
1da177e4 | 764 | |
f73d12bd | 765 | static struct sunqec *get_qec(struct platform_device *child) |
ecba38ab | 766 | { |
2dc11581 | 767 | struct platform_device *op = to_platform_device(child->dev.parent); |
ecba38ab | 768 | struct sunqec *qecp; |
1da177e4 | 769 | |
8513fbd8 | 770 | qecp = platform_get_drvdata(op); |
ecba38ab DM |
771 | if (!qecp) { |
772 | qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL); | |
773 | if (qecp) { | |
774 | u32 ctrl; | |
775 | ||
8e912b33 DM |
776 | qecp->op = op; |
777 | qecp->gregs = of_ioremap(&op->resource[0], 0, | |
778 | GLOB_REG_SIZE, | |
779 | "QEC Global Registers"); | |
ecba38ab DM |
780 | if (!qecp->gregs) |
781 | goto fail; | |
782 | ||
783 | /* Make sure the QEC is in MACE mode. */ | |
784 | ctrl = sbus_readl(qecp->gregs + GLOB_CTRL); | |
785 | ctrl &= 0xf0000000; | |
786 | if (ctrl != GLOB_CTRL_MMODE) { | |
787 | printk(KERN_ERR "qec: Not in MACE mode!\n"); | |
788 | goto fail; | |
789 | } | |
1da177e4 | 790 | |
ecba38ab DM |
791 | if (qec_global_reset(qecp->gregs)) |
792 | goto fail; | |
1da177e4 | 793 | |
61c7a080 | 794 | qecp->qec_bursts = qec_get_burst(op->dev.of_node); |
1da177e4 | 795 | |
8e912b33 | 796 | qec_init_once(qecp, op); |
1da177e4 | 797 | |
1636f8ac | 798 | if (request_irq(op->archdata.irqs[0], qec_interrupt, |
1fb9df5d | 799 | IRQF_SHARED, "qec", (void *) qecp)) { |
ecba38ab DM |
800 | printk(KERN_ERR "qec: Can't register irq.\n"); |
801 | goto fail; | |
802 | } | |
1da177e4 | 803 | |
8513fbd8 | 804 | platform_set_drvdata(op, qecp); |
8e912b33 | 805 | |
ecba38ab DM |
806 | qecp->next_module = root_qec_dev; |
807 | root_qec_dev = qecp; | |
808 | } | |
1da177e4 LT |
809 | } |
810 | ||
ecba38ab | 811 | return qecp; |
1da177e4 | 812 | |
ecba38ab DM |
813 | fail: |
814 | if (qecp->gregs) | |
8e912b33 | 815 | of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE); |
ecba38ab DM |
816 | kfree(qecp); |
817 | return NULL; | |
818 | } | |
1da177e4 | 819 | |
ecd41373 DM |
820 | static const struct net_device_ops qec_ops = { |
821 | .ndo_open = qe_open, | |
822 | .ndo_stop = qe_close, | |
823 | .ndo_start_xmit = qe_start_xmit, | |
afc4b13d | 824 | .ndo_set_rx_mode = qe_set_multicast, |
ecd41373 | 825 | .ndo_tx_timeout = qe_tx_timeout, |
dac4696a DM |
826 | .ndo_change_mtu = eth_change_mtu, |
827 | .ndo_set_mac_address = eth_mac_addr, | |
828 | .ndo_validate_addr = eth_validate_addr, | |
ecd41373 DM |
829 | }; |
830 | ||
f73d12bd | 831 | static int qec_ether_init(struct platform_device *op) |
ecba38ab DM |
832 | { |
833 | static unsigned version_printed; | |
834 | struct net_device *dev; | |
ecba38ab | 835 | struct sunqec *qecp; |
8e912b33 | 836 | struct sunqe *qe; |
ecba38ab | 837 | int i, res; |
1da177e4 | 838 | |
ecba38ab DM |
839 | if (version_printed++ == 0) |
840 | printk(KERN_INFO "%s", version); | |
1da177e4 | 841 | |
ecba38ab DM |
842 | dev = alloc_etherdev(sizeof(struct sunqe)); |
843 | if (!dev) | |
844 | return -ENOMEM; | |
1da177e4 | 845 | |
d458cdf7 | 846 | memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); |
d0dc1129 | 847 | |
ecba38ab | 848 | qe = netdev_priv(dev); |
1da177e4 | 849 | |
8e912b33 DM |
850 | res = -ENODEV; |
851 | ||
61c7a080 | 852 | i = of_getintprop_default(op->dev.of_node, "channel#", -1); |
8e912b33 DM |
853 | if (i == -1) |
854 | goto fail; | |
ecba38ab DM |
855 | qe->channel = i; |
856 | spin_lock_init(&qe->lock); | |
6aa20a22 | 857 | |
8e912b33 | 858 | qecp = get_qec(op); |
ecba38ab DM |
859 | if (!qecp) |
860 | goto fail; | |
1da177e4 | 861 | |
ecba38ab DM |
862 | qecp->qes[qe->channel] = qe; |
863 | qe->dev = dev; | |
864 | qe->parent = qecp; | |
8e912b33 | 865 | qe->op = op; |
1da177e4 | 866 | |
ecba38ab | 867 | res = -ENOMEM; |
8e912b33 DM |
868 | qe->qcregs = of_ioremap(&op->resource[0], 0, |
869 | CREG_REG_SIZE, "QEC Channel Registers"); | |
ecba38ab DM |
870 | if (!qe->qcregs) { |
871 | printk(KERN_ERR "qe: Cannot map channel registers.\n"); | |
872 | goto fail; | |
1da177e4 LT |
873 | } |
874 | ||
8e912b33 DM |
875 | qe->mregs = of_ioremap(&op->resource[1], 0, |
876 | MREGS_REG_SIZE, "QE MACE Registers"); | |
ecba38ab DM |
877 | if (!qe->mregs) { |
878 | printk(KERN_ERR "qe: Cannot map MACE registers.\n"); | |
879 | goto fail; | |
1da177e4 LT |
880 | } |
881 | ||
8e912b33 | 882 | qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE, |
738f2b7b | 883 | &qe->qblock_dvma, GFP_ATOMIC); |
8e912b33 | 884 | qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers), |
738f2b7b | 885 | &qe->buffers_dvma, GFP_ATOMIC); |
ecba38ab DM |
886 | if (qe->qe_block == NULL || qe->qblock_dvma == 0 || |
887 | qe->buffers == NULL || qe->buffers_dvma == 0) | |
888 | goto fail; | |
889 | ||
890 | /* Stop this QE. */ | |
891 | qe_stop(qe); | |
892 | ||
8e912b33 | 893 | SET_NETDEV_DEV(dev, &op->dev); |
ecba38ab | 894 | |
ecba38ab | 895 | dev->watchdog_timeo = 5*HZ; |
1636f8ac | 896 | dev->irq = op->archdata.irqs[0]; |
ecba38ab DM |
897 | dev->dma = 0; |
898 | dev->ethtool_ops = &qe_ethtool_ops; | |
ecd41373 | 899 | dev->netdev_ops = &qec_ops; |
ecba38ab DM |
900 | |
901 | res = register_netdev(dev); | |
902 | if (res) | |
903 | goto fail; | |
904 | ||
8513fbd8 | 905 | platform_set_drvdata(op, qe); |
ecba38ab | 906 | |
d558950e DK |
907 | printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel, |
908 | dev->dev_addr); | |
1da177e4 LT |
909 | return 0; |
910 | ||
ecba38ab DM |
911 | fail: |
912 | if (qe->qcregs) | |
8e912b33 | 913 | of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE); |
ecba38ab | 914 | if (qe->mregs) |
8e912b33 | 915 | of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE); |
ecba38ab | 916 | if (qe->qe_block) |
8e912b33 DM |
917 | dma_free_coherent(&op->dev, PAGE_SIZE, |
918 | qe->qe_block, qe->qblock_dvma); | |
ecba38ab | 919 | if (qe->buffers) |
8e912b33 | 920 | dma_free_coherent(&op->dev, |
738f2b7b DM |
921 | sizeof(struct sunqe_buffers), |
922 | qe->buffers, | |
923 | qe->buffers_dvma); | |
ecba38ab DM |
924 | |
925 | free_netdev(dev); | |
926 | ||
1da177e4 LT |
927 | return res; |
928 | } | |
929 | ||
f73d12bd | 930 | static int qec_sbus_probe(struct platform_device *op) |
1da177e4 | 931 | { |
8e912b33 | 932 | return qec_ether_init(op); |
1da177e4 LT |
933 | } |
934 | ||
f73d12bd | 935 | static int qec_sbus_remove(struct platform_device *op) |
1da177e4 | 936 | { |
8513fbd8 | 937 | struct sunqe *qp = platform_get_drvdata(op); |
ecba38ab DM |
938 | struct net_device *net_dev = qp->dev; |
939 | ||
d0dc1129 | 940 | unregister_netdev(net_dev); |
ecba38ab | 941 | |
8e912b33 DM |
942 | of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE); |
943 | of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE); | |
944 | dma_free_coherent(&op->dev, PAGE_SIZE, | |
945 | qp->qe_block, qp->qblock_dvma); | |
946 | dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers), | |
947 | qp->buffers, qp->buffers_dvma); | |
ecba38ab DM |
948 | |
949 | free_netdev(net_dev); | |
950 | ||
1da177e4 LT |
951 | return 0; |
952 | } | |
953 | ||
fd098316 | 954 | static const struct of_device_id qec_sbus_match[] = { |
ecba38ab DM |
955 | { |
956 | .name = "qe", | |
957 | }, | |
958 | {}, | |
959 | }; | |
960 | ||
961 | MODULE_DEVICE_TABLE(of, qec_sbus_match); | |
962 | ||
74888760 | 963 | static struct platform_driver qec_sbus_driver = { |
4018294b GL |
964 | .driver = { |
965 | .name = "qec", | |
4018294b GL |
966 | .of_match_table = qec_sbus_match, |
967 | }, | |
ecba38ab | 968 | .probe = qec_sbus_probe, |
f73d12bd | 969 | .remove = qec_sbus_remove, |
ecba38ab DM |
970 | }; |
971 | ||
972 | static int __init qec_init(void) | |
973 | { | |
74888760 | 974 | return platform_driver_register(&qec_sbus_driver); |
ecba38ab DM |
975 | } |
976 | ||
977 | static void __exit qec_exit(void) | |
1da177e4 | 978 | { |
74888760 | 979 | platform_driver_unregister(&qec_sbus_driver); |
1da177e4 LT |
980 | |
981 | while (root_qec_dev) { | |
ecba38ab | 982 | struct sunqec *next = root_qec_dev->next_module; |
2dc11581 | 983 | struct platform_device *op = root_qec_dev->op; |
ecba38ab | 984 | |
1636f8ac | 985 | free_irq(op->archdata.irqs[0], (void *) root_qec_dev); |
8e912b33 DM |
986 | of_iounmap(&op->resource[0], root_qec_dev->gregs, |
987 | GLOB_REG_SIZE); | |
1da177e4 | 988 | kfree(root_qec_dev); |
ecba38ab DM |
989 | |
990 | root_qec_dev = next; | |
1da177e4 LT |
991 | } |
992 | } | |
993 | ||
ecba38ab DM |
994 | module_init(qec_init); |
995 | module_exit(qec_exit); |