Commit | Line | Data |
---|---|---|
1d3bb996 | 1 | /* |
3396c782 | 2 | * drivers/net/ethernet/ibm/emac/mal.c |
1d3bb996 DG |
3 | * |
4 | * Memory Access Layer (MAL) support | |
5 | * | |
17cf803a BH |
6 | * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. |
7 | * <benh@kernel.crashing.org> | |
8 | * | |
9 | * Based on the arch/ppc version of the driver: | |
10 | * | |
1d3bb996 DG |
11 | * Copyright (c) 2004, 2005 Zultys Technologies. |
12 | * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> | |
13 | * | |
14 | * Based on original work by | |
15 | * Benjamin Herrenschmidt <benh@kernel.crashing.org>, | |
16 | * David Gibson <hermes@gibson.dropbear.id.au>, | |
17 | * | |
18 | * Armin Kuster <akuster@mvista.com> | |
19 | * Copyright 2002 MontaVista Softare Inc. | |
20 | * | |
21 | * This program is free software; you can redistribute it and/or modify it | |
22 | * under the terms of the GNU General Public License as published by the | |
23 | * Free Software Foundation; either version 2 of the License, or (at your | |
24 | * option) any later version. | |
25 | * | |
26 | */ | |
27 | ||
28 | #include <linux/delay.h> | |
5a0e3ad6 | 29 | #include <linux/slab.h> |
1d3bb996 DG |
30 | |
31 | #include "core.h" | |
fbcc4bac | 32 | #include <asm/dcr-regs.h> |
1d3bb996 DG |
33 | |
34 | static int mal_count; | |
35 | ||
1dd06ae8 | 36 | int mal_register_commac(struct mal_instance *mal, struct mal_commac *commac) |
1d3bb996 DG |
37 | { |
38 | unsigned long flags; | |
39 | ||
40 | spin_lock_irqsave(&mal->lock, flags); | |
41 | ||
42 | MAL_DBG(mal, "reg(%08x, %08x)" NL, | |
43 | commac->tx_chan_mask, commac->rx_chan_mask); | |
44 | ||
45 | /* Don't let multiple commacs claim the same channel(s) */ | |
46 | if ((mal->tx_chan_mask & commac->tx_chan_mask) || | |
47 | (mal->rx_chan_mask & commac->rx_chan_mask)) { | |
48 | spin_unlock_irqrestore(&mal->lock, flags); | |
49 | printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n", | |
50 | mal->index); | |
51 | return -EBUSY; | |
52 | } | |
53 | ||
b3e441c6 BH |
54 | if (list_empty(&mal->list)) |
55 | napi_enable(&mal->napi); | |
1d3bb996 DG |
56 | mal->tx_chan_mask |= commac->tx_chan_mask; |
57 | mal->rx_chan_mask |= commac->rx_chan_mask; | |
58 | list_add(&commac->list, &mal->list); | |
59 | ||
60 | spin_unlock_irqrestore(&mal->lock, flags); | |
61 | ||
62 | return 0; | |
63 | } | |
64 | ||
51d4a1cc JB |
65 | void mal_unregister_commac(struct mal_instance *mal, |
66 | struct mal_commac *commac) | |
1d3bb996 DG |
67 | { |
68 | unsigned long flags; | |
69 | ||
70 | spin_lock_irqsave(&mal->lock, flags); | |
71 | ||
72 | MAL_DBG(mal, "unreg(%08x, %08x)" NL, | |
73 | commac->tx_chan_mask, commac->rx_chan_mask); | |
74 | ||
75 | mal->tx_chan_mask &= ~commac->tx_chan_mask; | |
76 | mal->rx_chan_mask &= ~commac->rx_chan_mask; | |
77 | list_del_init(&commac->list); | |
b3e441c6 BH |
78 | if (list_empty(&mal->list)) |
79 | napi_disable(&mal->napi); | |
1d3bb996 DG |
80 | |
81 | spin_unlock_irqrestore(&mal->lock, flags); | |
82 | } | |
83 | ||
84 | int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size) | |
85 | { | |
86 | BUG_ON(channel < 0 || channel >= mal->num_rx_chans || | |
87 | size > MAL_MAX_RX_SIZE); | |
88 | ||
89 | MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size); | |
90 | ||
91 | if (size & 0xf) { | |
92 | printk(KERN_WARNING | |
93 | "mal%d: incorrect RX size %lu for the channel %d\n", | |
94 | mal->index, size, channel); | |
95 | return -EINVAL; | |
96 | } | |
97 | ||
98 | set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4); | |
99 | return 0; | |
100 | } | |
101 | ||
102 | int mal_tx_bd_offset(struct mal_instance *mal, int channel) | |
103 | { | |
104 | BUG_ON(channel < 0 || channel >= mal->num_tx_chans); | |
105 | ||
106 | return channel * NUM_TX_BUFF; | |
107 | } | |
108 | ||
109 | int mal_rx_bd_offset(struct mal_instance *mal, int channel) | |
110 | { | |
111 | BUG_ON(channel < 0 || channel >= mal->num_rx_chans); | |
112 | return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF; | |
113 | } | |
114 | ||
115 | void mal_enable_tx_channel(struct mal_instance *mal, int channel) | |
116 | { | |
117 | unsigned long flags; | |
118 | ||
119 | spin_lock_irqsave(&mal->lock, flags); | |
120 | ||
121 | MAL_DBG(mal, "enable_tx(%d)" NL, channel); | |
122 | ||
123 | set_mal_dcrn(mal, MAL_TXCASR, | |
124 | get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel)); | |
125 | ||
126 | spin_unlock_irqrestore(&mal->lock, flags); | |
127 | } | |
128 | ||
129 | void mal_disable_tx_channel(struct mal_instance *mal, int channel) | |
130 | { | |
131 | set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel)); | |
132 | ||
133 | MAL_DBG(mal, "disable_tx(%d)" NL, channel); | |
134 | } | |
135 | ||
136 | void mal_enable_rx_channel(struct mal_instance *mal, int channel) | |
137 | { | |
138 | unsigned long flags; | |
139 | ||
afd1dee8 SR |
140 | /* |
141 | * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple | |
142 | * of 8, but enabling in MAL_RXCASR needs the divided by 8 value | |
143 | * for the bitmask | |
144 | */ | |
145 | if (!(channel % 8)) | |
146 | channel >>= 3; | |
147 | ||
1d3bb996 DG |
148 | spin_lock_irqsave(&mal->lock, flags); |
149 | ||
150 | MAL_DBG(mal, "enable_rx(%d)" NL, channel); | |
151 | ||
152 | set_mal_dcrn(mal, MAL_RXCASR, | |
153 | get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel)); | |
154 | ||
155 | spin_unlock_irqrestore(&mal->lock, flags); | |
156 | } | |
157 | ||
158 | void mal_disable_rx_channel(struct mal_instance *mal, int channel) | |
159 | { | |
afd1dee8 SR |
160 | /* |
161 | * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple | |
162 | * of 8, but enabling in MAL_RXCASR needs the divided by 8 value | |
163 | * for the bitmask | |
164 | */ | |
165 | if (!(channel % 8)) | |
166 | channel >>= 3; | |
167 | ||
1d3bb996 DG |
168 | set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel)); |
169 | ||
170 | MAL_DBG(mal, "disable_rx(%d)" NL, channel); | |
171 | } | |
172 | ||
173 | void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac) | |
174 | { | |
175 | unsigned long flags; | |
176 | ||
177 | spin_lock_irqsave(&mal->lock, flags); | |
178 | ||
179 | MAL_DBG(mal, "poll_add(%p)" NL, commac); | |
180 | ||
181 | /* starts disabled */ | |
182 | set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); | |
183 | ||
184 | list_add_tail(&commac->poll_list, &mal->poll_list); | |
185 | ||
186 | spin_unlock_irqrestore(&mal->lock, flags); | |
187 | } | |
188 | ||
189 | void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac) | |
190 | { | |
191 | unsigned long flags; | |
192 | ||
193 | spin_lock_irqsave(&mal->lock, flags); | |
194 | ||
195 | MAL_DBG(mal, "poll_del(%p)" NL, commac); | |
196 | ||
197 | list_del(&commac->poll_list); | |
198 | ||
199 | spin_unlock_irqrestore(&mal->lock, flags); | |
200 | } | |
201 | ||
202 | /* synchronized by mal_poll() */ | |
203 | static inline void mal_enable_eob_irq(struct mal_instance *mal) | |
204 | { | |
205 | MAL_DBG2(mal, "enable_irq" NL); | |
206 | ||
207 | // XXX might want to cache MAL_CFG as the DCR read can be slooooow | |
208 | set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE); | |
209 | } | |
210 | ||
b3e441c6 | 211 | /* synchronized by NAPI state */ |
1d3bb996 DG |
212 | static inline void mal_disable_eob_irq(struct mal_instance *mal) |
213 | { | |
214 | // XXX might want to cache MAL_CFG as the DCR read can be slooooow | |
215 | set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE); | |
216 | ||
217 | MAL_DBG2(mal, "disable_irq" NL); | |
218 | } | |
219 | ||
220 | static irqreturn_t mal_serr(int irq, void *dev_instance) | |
221 | { | |
222 | struct mal_instance *mal = dev_instance; | |
223 | ||
224 | u32 esr = get_mal_dcrn(mal, MAL_ESR); | |
225 | ||
226 | /* Clear the error status register */ | |
227 | set_mal_dcrn(mal, MAL_ESR, esr); | |
228 | ||
229 | MAL_DBG(mal, "SERR %08x" NL, esr); | |
230 | ||
231 | if (esr & MAL_ESR_EVB) { | |
232 | if (esr & MAL_ESR_DE) { | |
233 | /* We ignore Descriptor error, | |
234 | * TXDE or RXDE interrupt will be generated anyway. | |
235 | */ | |
236 | return IRQ_HANDLED; | |
237 | } | |
238 | ||
239 | if (esr & MAL_ESR_PEIN) { | |
240 | /* PLB error, it's probably buggy hardware or | |
241 | * incorrect physical address in BD (i.e. bug) | |
242 | */ | |
243 | if (net_ratelimit()) | |
244 | printk(KERN_ERR | |
245 | "mal%d: system error, " | |
246 | "PLB (ESR = 0x%08x)\n", | |
247 | mal->index, esr); | |
248 | return IRQ_HANDLED; | |
249 | } | |
250 | ||
251 | /* OPB error, it's probably buggy hardware or incorrect | |
252 | * EBC setup | |
253 | */ | |
254 | if (net_ratelimit()) | |
255 | printk(KERN_ERR | |
256 | "mal%d: system error, OPB (ESR = 0x%08x)\n", | |
257 | mal->index, esr); | |
258 | } | |
259 | return IRQ_HANDLED; | |
260 | } | |
261 | ||
262 | static inline void mal_schedule_poll(struct mal_instance *mal) | |
263 | { | |
59e90b2d | 264 | if (likely(napi_schedule_prep(&mal->napi))) { |
1d3bb996 DG |
265 | MAL_DBG2(mal, "schedule_poll" NL); |
266 | mal_disable_eob_irq(mal); | |
59e90b2d | 267 | __napi_schedule(&mal->napi); |
1d3bb996 DG |
268 | } else |
269 | MAL_DBG2(mal, "already in poll" NL); | |
270 | } | |
271 | ||
272 | static irqreturn_t mal_txeob(int irq, void *dev_instance) | |
273 | { | |
274 | struct mal_instance *mal = dev_instance; | |
275 | ||
276 | u32 r = get_mal_dcrn(mal, MAL_TXEOBISR); | |
277 | ||
278 | MAL_DBG2(mal, "txeob %08x" NL, r); | |
279 | ||
280 | mal_schedule_poll(mal); | |
281 | set_mal_dcrn(mal, MAL_TXEOBISR, r); | |
282 | ||
1ff0fcfc | 283 | #ifdef CONFIG_PPC_DCR_NATIVE |
fbcc4bac JB |
284 | if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) |
285 | mtdcri(SDR0, DCRN_SDR_ICINTSTAT, | |
286 | (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX)); | |
1ff0fcfc | 287 | #endif |
fbcc4bac | 288 | |
1d3bb996 DG |
289 | return IRQ_HANDLED; |
290 | } | |
291 | ||
292 | static irqreturn_t mal_rxeob(int irq, void *dev_instance) | |
293 | { | |
294 | struct mal_instance *mal = dev_instance; | |
295 | ||
296 | u32 r = get_mal_dcrn(mal, MAL_RXEOBISR); | |
297 | ||
298 | MAL_DBG2(mal, "rxeob %08x" NL, r); | |
299 | ||
300 | mal_schedule_poll(mal); | |
301 | set_mal_dcrn(mal, MAL_RXEOBISR, r); | |
302 | ||
1ff0fcfc | 303 | #ifdef CONFIG_PPC_DCR_NATIVE |
fbcc4bac JB |
304 | if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) |
305 | mtdcri(SDR0, DCRN_SDR_ICINTSTAT, | |
306 | (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX)); | |
1ff0fcfc | 307 | #endif |
fbcc4bac | 308 | |
1d3bb996 DG |
309 | return IRQ_HANDLED; |
310 | } | |
311 | ||
312 | static irqreturn_t mal_txde(int irq, void *dev_instance) | |
313 | { | |
314 | struct mal_instance *mal = dev_instance; | |
315 | ||
316 | u32 deir = get_mal_dcrn(mal, MAL_TXDEIR); | |
317 | set_mal_dcrn(mal, MAL_TXDEIR, deir); | |
318 | ||
319 | MAL_DBG(mal, "txde %08x" NL, deir); | |
320 | ||
321 | if (net_ratelimit()) | |
322 | printk(KERN_ERR | |
323 | "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n", | |
324 | mal->index, deir); | |
325 | ||
326 | return IRQ_HANDLED; | |
327 | } | |
328 | ||
329 | static irqreturn_t mal_rxde(int irq, void *dev_instance) | |
330 | { | |
331 | struct mal_instance *mal = dev_instance; | |
332 | struct list_head *l; | |
333 | ||
334 | u32 deir = get_mal_dcrn(mal, MAL_RXDEIR); | |
335 | ||
336 | MAL_DBG(mal, "rxde %08x" NL, deir); | |
337 | ||
338 | list_for_each(l, &mal->list) { | |
339 | struct mal_commac *mc = list_entry(l, struct mal_commac, list); | |
340 | if (deir & mc->rx_chan_mask) { | |
341 | set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags); | |
342 | mc->ops->rxde(mc->dev); | |
343 | } | |
344 | } | |
345 | ||
346 | mal_schedule_poll(mal); | |
347 | set_mal_dcrn(mal, MAL_RXDEIR, deir); | |
348 | ||
349 | return IRQ_HANDLED; | |
350 | } | |
351 | ||
fbcc4bac JB |
352 | static irqreturn_t mal_int(int irq, void *dev_instance) |
353 | { | |
354 | struct mal_instance *mal = dev_instance; | |
355 | u32 esr = get_mal_dcrn(mal, MAL_ESR); | |
356 | ||
357 | if (esr & MAL_ESR_EVB) { | |
358 | /* descriptor error */ | |
359 | if (esr & MAL_ESR_DE) { | |
360 | if (esr & MAL_ESR_CIDT) | |
361 | return mal_rxde(irq, dev_instance); | |
362 | else | |
363 | return mal_txde(irq, dev_instance); | |
364 | } else { /* SERR */ | |
365 | return mal_serr(irq, dev_instance); | |
366 | } | |
367 | } | |
368 | return IRQ_HANDLED; | |
369 | } | |
370 | ||
1d3bb996 DG |
371 | void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac) |
372 | { | |
373 | /* Spinlock-type semantics: only one caller disable poll at a time */ | |
374 | while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags)) | |
375 | msleep(1); | |
376 | ||
b3e441c6 | 377 | /* Synchronize with the MAL NAPI poller */ |
e30d4227 | 378 | napi_synchronize(&mal->napi); |
1d3bb996 DG |
379 | } |
380 | ||
381 | void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) | |
382 | { | |
383 | smp_wmb(); | |
384 | clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); | |
385 | ||
b3e441c6 BH |
386 | /* Feels better to trigger a poll here to catch up with events that |
387 | * may have happened on this channel while disabled. It will most | |
388 | * probably be delayed until the next interrupt but that's mostly a | |
389 | * non-issue in the context where this is called. | |
390 | */ | |
391 | napi_schedule(&mal->napi); | |
1d3bb996 DG |
392 | } |
393 | ||
59e90b2d | 394 | static int mal_poll(struct napi_struct *napi, int budget) |
1d3bb996 | 395 | { |
59e90b2d | 396 | struct mal_instance *mal = container_of(napi, struct mal_instance, napi); |
1d3bb996 | 397 | struct list_head *l; |
59e90b2d | 398 | int received = 0; |
1d3bb996 DG |
399 | unsigned long flags; |
400 | ||
b3e441c6 | 401 | MAL_DBG2(mal, "poll(%d)" NL, budget); |
1d3bb996 DG |
402 | again: |
403 | /* Process TX skbs */ | |
404 | list_for_each(l, &mal->poll_list) { | |
405 | struct mal_commac *mc = | |
406 | list_entry(l, struct mal_commac, poll_list); | |
407 | mc->ops->poll_tx(mc->dev); | |
408 | } | |
409 | ||
410 | /* Process RX skbs. | |
411 | * | |
412 | * We _might_ need something more smart here to enforce polling | |
413 | * fairness. | |
414 | */ | |
415 | list_for_each(l, &mal->poll_list) { | |
416 | struct mal_commac *mc = | |
417 | list_entry(l, struct mal_commac, poll_list); | |
418 | int n; | |
419 | if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) | |
420 | continue; | |
59e90b2d | 421 | n = mc->ops->poll_rx(mc->dev, budget); |
1d3bb996 DG |
422 | if (n) { |
423 | received += n; | |
59e90b2d RD |
424 | budget -= n; |
425 | if (budget <= 0) | |
426 | goto more_work; // XXX What if this is the last one ? | |
1d3bb996 DG |
427 | } |
428 | } | |
429 | ||
430 | /* We need to disable IRQs to protect from RXDE IRQ here */ | |
431 | spin_lock_irqsave(&mal->lock, flags); | |
59e90b2d | 432 | __napi_complete(napi); |
1d3bb996 DG |
433 | mal_enable_eob_irq(mal); |
434 | spin_unlock_irqrestore(&mal->lock, flags); | |
435 | ||
1d3bb996 DG |
436 | /* Check for "rotting" packet(s) */ |
437 | list_for_each(l, &mal->poll_list) { | |
438 | struct mal_commac *mc = | |
439 | list_entry(l, struct mal_commac, poll_list); | |
440 | if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) | |
441 | continue; | |
442 | if (unlikely(mc->ops->peek_rx(mc->dev) || | |
443 | test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) { | |
444 | MAL_DBG2(mal, "rotting packet" NL); | |
59e90b2d | 445 | if (napi_reschedule(napi)) |
1d3bb996 DG |
446 | mal_disable_eob_irq(mal); |
447 | else | |
448 | MAL_DBG2(mal, "already in poll list" NL); | |
449 | ||
59e90b2d | 450 | if (budget > 0) |
1d3bb996 DG |
451 | goto again; |
452 | else | |
453 | goto more_work; | |
454 | } | |
455 | mc->ops->poll_tx(mc->dev); | |
456 | } | |
457 | ||
458 | more_work: | |
59e90b2d RD |
459 | MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received); |
460 | return received; | |
1d3bb996 DG |
461 | } |
462 | ||
463 | static void mal_reset(struct mal_instance *mal) | |
464 | { | |
465 | int n = 10; | |
466 | ||
467 | MAL_DBG(mal, "reset" NL); | |
468 | ||
469 | set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR); | |
470 | ||
471 | /* Wait for reset to complete (1 system clock) */ | |
472 | while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n) | |
473 | --n; | |
474 | ||
475 | if (unlikely(!n)) | |
476 | printk(KERN_ERR "mal%d: reset timeout\n", mal->index); | |
477 | } | |
478 | ||
479 | int mal_get_regs_len(struct mal_instance *mal) | |
480 | { | |
481 | return sizeof(struct emac_ethtool_regs_subhdr) + | |
482 | sizeof(struct mal_regs); | |
483 | } | |
484 | ||
485 | void *mal_dump_regs(struct mal_instance *mal, void *buf) | |
486 | { | |
487 | struct emac_ethtool_regs_subhdr *hdr = buf; | |
488 | struct mal_regs *regs = (struct mal_regs *)(hdr + 1); | |
489 | int i; | |
490 | ||
491 | hdr->version = mal->version; | |
492 | hdr->index = mal->index; | |
493 | ||
494 | regs->tx_count = mal->num_tx_chans; | |
495 | regs->rx_count = mal->num_rx_chans; | |
496 | ||
497 | regs->cfg = get_mal_dcrn(mal, MAL_CFG); | |
498 | regs->esr = get_mal_dcrn(mal, MAL_ESR); | |
499 | regs->ier = get_mal_dcrn(mal, MAL_IER); | |
500 | regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR); | |
501 | regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR); | |
502 | regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR); | |
503 | regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR); | |
504 | regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR); | |
505 | regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR); | |
506 | regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR); | |
507 | regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR); | |
508 | ||
509 | for (i = 0; i < regs->tx_count; ++i) | |
510 | regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i)); | |
511 | ||
512 | for (i = 0; i < regs->rx_count; ++i) { | |
513 | regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i)); | |
514 | regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i)); | |
515 | } | |
516 | return regs + 1; | |
517 | } | |
518 | ||
fe17dc1e | 519 | static int mal_probe(struct platform_device *ofdev) |
1d3bb996 DG |
520 | { |
521 | struct mal_instance *mal; | |
522 | int err = 0, i, bd_size; | |
523 | int index = mal_count++; | |
79203695 | 524 | unsigned int dcr_base; |
1d3bb996 DG |
525 | const u32 *prop; |
526 | u32 cfg; | |
fbcc4bac JB |
527 | unsigned long irqflags; |
528 | irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde; | |
1d3bb996 DG |
529 | |
530 | mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL); | |
b2adaca9 | 531 | if (!mal) |
1d3bb996 | 532 | return -ENOMEM; |
b2adaca9 | 533 | |
1d3bb996 DG |
534 | mal->index = index; |
535 | mal->ofdev = ofdev; | |
61c7a080 | 536 | mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1; |
1d3bb996 DG |
537 | |
538 | MAL_DBG(mal, "probe" NL); | |
539 | ||
61c7a080 | 540 | prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL); |
1d3bb996 DG |
541 | if (prop == NULL) { |
542 | printk(KERN_ERR | |
543 | "mal%d: can't find MAL num-tx-chans property!\n", | |
544 | index); | |
545 | err = -ENODEV; | |
546 | goto fail; | |
547 | } | |
548 | mal->num_tx_chans = prop[0]; | |
549 | ||
61c7a080 | 550 | prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL); |
1d3bb996 DG |
551 | if (prop == NULL) { |
552 | printk(KERN_ERR | |
553 | "mal%d: can't find MAL num-rx-chans property!\n", | |
554 | index); | |
555 | err = -ENODEV; | |
556 | goto fail; | |
557 | } | |
558 | mal->num_rx_chans = prop[0]; | |
559 | ||
61c7a080 | 560 | dcr_base = dcr_resource_start(ofdev->dev.of_node, 0); |
79203695 | 561 | if (dcr_base == 0) { |
1d3bb996 DG |
562 | printk(KERN_ERR |
563 | "mal%d: can't find DCR resource!\n", index); | |
564 | err = -ENODEV; | |
565 | goto fail; | |
566 | } | |
61c7a080 | 567 | mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100); |
1d3bb996 DG |
568 | if (!DCR_MAP_OK(mal->dcr_host)) { |
569 | printk(KERN_ERR | |
570 | "mal%d: failed to map DCRs !\n", index); | |
571 | err = -ENODEV; | |
572 | goto fail; | |
573 | } | |
574 | ||
61c7a080 | 575 | if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) { |
3b3bceef TB |
576 | #if defined(CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT) && \ |
577 | defined(CONFIG_IBM_EMAC_MAL_COMMON_ERR) | |
fbcc4bac JB |
578 | mal->features |= (MAL_FTR_CLEAR_ICINTSTAT | |
579 | MAL_FTR_COMMON_ERR_INT); | |
1ff0fcfc JB |
580 | #else |
581 | printk(KERN_ERR "%s: Support for 405EZ not enabled!\n", | |
61c7a080 | 582 | ofdev->dev.of_node->full_name); |
1ff0fcfc JB |
583 | err = -ENODEV; |
584 | goto fail; | |
585 | #endif | |
586 | } | |
fbcc4bac | 587 | |
61c7a080 GL |
588 | mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); |
589 | mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1); | |
590 | mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2); | |
fbcc4bac JB |
591 | |
592 | if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) { | |
593 | mal->txde_irq = mal->rxde_irq = mal->serr_irq; | |
594 | } else { | |
61c7a080 GL |
595 | mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3); |
596 | mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4); | |
fbcc4bac JB |
597 | } |
598 | ||
1d3bb996 DG |
599 | if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ || |
600 | mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ || | |
601 | mal->rxde_irq == NO_IRQ) { | |
602 | printk(KERN_ERR | |
603 | "mal%d: failed to map interrupts !\n", index); | |
604 | err = -ENODEV; | |
605 | goto fail_unmap; | |
606 | } | |
607 | ||
608 | INIT_LIST_HEAD(&mal->poll_list); | |
1d3bb996 DG |
609 | INIT_LIST_HEAD(&mal->list); |
610 | spin_lock_init(&mal->lock); | |
611 | ||
937f1ba5 BH |
612 | init_dummy_netdev(&mal->dummy_dev); |
613 | ||
614 | netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll, | |
3b3bceef | 615 | CONFIG_IBM_EMAC_POLL_WEIGHT); |
b3e441c6 | 616 | |
1d3bb996 DG |
617 | /* Load power-on reset defaults */ |
618 | mal_reset(mal); | |
619 | ||
620 | /* Set the MAL configuration register */ | |
621 | cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT; | |
622 | cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA; | |
623 | ||
624 | /* Current Axon is not happy with priority being non-0, it can | |
625 | * deadlock, fix it up here | |
626 | */ | |
61c7a080 | 627 | if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon")) |
1d3bb996 DG |
628 | cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10); |
629 | ||
630 | /* Apply configuration */ | |
631 | set_mal_dcrn(mal, MAL_CFG, cfg); | |
632 | ||
633 | /* Allocate space for BD rings */ | |
634 | BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32); | |
635 | BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32); | |
636 | ||
637 | bd_size = sizeof(struct mal_descriptor) * | |
638 | (NUM_TX_BUFF * mal->num_tx_chans + | |
639 | NUM_RX_BUFF * mal->num_rx_chans); | |
d0320f75 | 640 | mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, |
1f9061d2 | 641 | GFP_KERNEL | __GFP_ZERO); |
1d3bb996 | 642 | if (mal->bd_virt == NULL) { |
1d3bb996 DG |
643 | err = -ENOMEM; |
644 | goto fail_unmap; | |
645 | } | |
1d3bb996 DG |
646 | |
647 | for (i = 0; i < mal->num_tx_chans; ++i) | |
648 | set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma + | |
649 | sizeof(struct mal_descriptor) * | |
650 | mal_tx_bd_offset(mal, i)); | |
651 | ||
652 | for (i = 0; i < mal->num_rx_chans; ++i) | |
653 | set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma + | |
654 | sizeof(struct mal_descriptor) * | |
655 | mal_rx_bd_offset(mal, i)); | |
656 | ||
fbcc4bac JB |
657 | if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) { |
658 | irqflags = IRQF_SHARED; | |
659 | hdlr_serr = hdlr_txde = hdlr_rxde = mal_int; | |
660 | } else { | |
661 | irqflags = 0; | |
662 | hdlr_serr = mal_serr; | |
663 | hdlr_txde = mal_txde; | |
664 | hdlr_rxde = mal_rxde; | |
665 | } | |
666 | ||
667 | err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal); | |
1d3bb996 DG |
668 | if (err) |
669 | goto fail2; | |
fbcc4bac | 670 | err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal); |
1d3bb996 DG |
671 | if (err) |
672 | goto fail3; | |
673 | err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal); | |
674 | if (err) | |
675 | goto fail4; | |
fbcc4bac | 676 | err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal); |
1d3bb996 DG |
677 | if (err) |
678 | goto fail5; | |
679 | err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal); | |
680 | if (err) | |
681 | goto fail6; | |
682 | ||
683 | /* Enable all MAL SERR interrupt sources */ | |
684 | if (mal->version == 2) | |
685 | set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS); | |
686 | else | |
687 | set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS); | |
688 | ||
689 | /* Enable EOB interrupt */ | |
690 | mal_enable_eob_irq(mal); | |
691 | ||
692 | printk(KERN_INFO | |
693 | "MAL v%d %s, %d TX channels, %d RX channels\n", | |
61c7a080 | 694 | mal->version, ofdev->dev.of_node->full_name, |
1d3bb996 DG |
695 | mal->num_tx_chans, mal->num_rx_chans); |
696 | ||
697 | /* Advertise this instance to the rest of the world */ | |
698 | wmb(); | |
8513fbd8 | 699 | platform_set_drvdata(ofdev, mal); |
1d3bb996 DG |
700 | |
701 | mal_dbg_register(mal); | |
702 | ||
703 | return 0; | |
704 | ||
705 | fail6: | |
706 | free_irq(mal->rxde_irq, mal); | |
707 | fail5: | |
708 | free_irq(mal->txeob_irq, mal); | |
709 | fail4: | |
710 | free_irq(mal->txde_irq, mal); | |
711 | fail3: | |
712 | free_irq(mal->serr_irq, mal); | |
713 | fail2: | |
714 | dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma); | |
715 | fail_unmap: | |
cdbd3865 | 716 | dcr_unmap(mal->dcr_host, 0x100); |
1d3bb996 DG |
717 | fail: |
718 | kfree(mal); | |
719 | ||
720 | return err; | |
721 | } | |
722 | ||
fe17dc1e | 723 | static int mal_remove(struct platform_device *ofdev) |
1d3bb996 | 724 | { |
8513fbd8 | 725 | struct mal_instance *mal = platform_get_drvdata(ofdev); |
1d3bb996 DG |
726 | |
727 | MAL_DBG(mal, "remove" NL); | |
728 | ||
59e90b2d RD |
729 | /* Synchronize with scheduled polling */ |
730 | napi_disable(&mal->napi); | |
1d3bb996 | 731 | |
f7c3f96a | 732 | if (!list_empty(&mal->list)) |
1d3bb996 | 733 | /* This is *very* bad */ |
f7c3f96a | 734 | WARN(1, KERN_EMERG |
1d3bb996 DG |
735 | "mal%d: commac list is not empty on remove!\n", |
736 | mal->index); | |
1d3bb996 | 737 | |
1d3bb996 DG |
738 | free_irq(mal->serr_irq, mal); |
739 | free_irq(mal->txde_irq, mal); | |
740 | free_irq(mal->txeob_irq, mal); | |
741 | free_irq(mal->rxde_irq, mal); | |
742 | free_irq(mal->rxeob_irq, mal); | |
743 | ||
744 | mal_reset(mal); | |
745 | ||
746 | mal_dbg_unregister(mal); | |
747 | ||
748 | dma_free_coherent(&ofdev->dev, | |
749 | sizeof(struct mal_descriptor) * | |
750 | (NUM_TX_BUFF * mal->num_tx_chans + | |
751 | NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt, | |
752 | mal->bd_dma); | |
753 | kfree(mal); | |
754 | ||
755 | return 0; | |
756 | } | |
757 | ||
758 | static struct of_device_id mal_platform_match[] = | |
759 | { | |
760 | { | |
761 | .compatible = "ibm,mcmal", | |
762 | }, | |
763 | { | |
764 | .compatible = "ibm,mcmal2", | |
765 | }, | |
766 | /* Backward compat */ | |
767 | { | |
768 | .type = "mcmal-dma", | |
769 | .compatible = "ibm,mcmal", | |
770 | }, | |
771 | { | |
772 | .type = "mcmal-dma", | |
773 | .compatible = "ibm,mcmal2", | |
774 | }, | |
775 | {}, | |
776 | }; | |
777 | ||
74888760 | 778 | static struct platform_driver mal_of_driver = { |
4018294b GL |
779 | .driver = { |
780 | .name = "mcmal", | |
781 | .owner = THIS_MODULE, | |
782 | .of_match_table = mal_platform_match, | |
783 | }, | |
1d3bb996 DG |
784 | .probe = mal_probe, |
785 | .remove = mal_remove, | |
786 | }; | |
787 | ||
788 | int __init mal_init(void) | |
789 | { | |
74888760 | 790 | return platform_driver_register(&mal_of_driver); |
1d3bb996 DG |
791 | } |
792 | ||
793 | void mal_exit(void) | |
794 | { | |
74888760 | 795 | platform_driver_unregister(&mal_of_driver); |
1d3bb996 | 796 | } |