Commit | Line | Data |
---|---|---|
1d3bb996 DG |
1 | /* |
2 | * drivers/net/ibm_newemac/mal.c | |
3 | * | |
4 | * Memory Access Layer (MAL) support | |
5 | * | |
6 | * Copyright (c) 2004, 2005 Zultys Technologies. | |
7 | * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> | |
8 | * | |
9 | * Based on original work by | |
10 | * Benjamin Herrenschmidt <benh@kernel.crashing.org>, | |
11 | * David Gibson <hermes@gibson.dropbear.id.au>, | |
12 | * | |
13 | * Armin Kuster <akuster@mvista.com> | |
14 | * Copyright 2002 MontaVista Softare Inc. | |
15 | * | |
16 | * This program is free software; you can redistribute it and/or modify it | |
17 | * under the terms of the GNU General Public License as published by the | |
18 | * Free Software Foundation; either version 2 of the License, or (at your | |
19 | * option) any later version. | |
20 | * | |
21 | */ | |
22 | ||
23 | #include <linux/delay.h> | |
24 | ||
25 | #include "core.h" | |
26 | ||
27 | static int mal_count; | |
28 | ||
29 | int __devinit mal_register_commac(struct mal_instance *mal, | |
30 | struct mal_commac *commac) | |
31 | { | |
32 | unsigned long flags; | |
33 | ||
34 | spin_lock_irqsave(&mal->lock, flags); | |
35 | ||
36 | MAL_DBG(mal, "reg(%08x, %08x)" NL, | |
37 | commac->tx_chan_mask, commac->rx_chan_mask); | |
38 | ||
39 | /* Don't let multiple commacs claim the same channel(s) */ | |
40 | if ((mal->tx_chan_mask & commac->tx_chan_mask) || | |
41 | (mal->rx_chan_mask & commac->rx_chan_mask)) { | |
42 | spin_unlock_irqrestore(&mal->lock, flags); | |
43 | printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n", | |
44 | mal->index); | |
45 | return -EBUSY; | |
46 | } | |
47 | ||
48 | mal->tx_chan_mask |= commac->tx_chan_mask; | |
49 | mal->rx_chan_mask |= commac->rx_chan_mask; | |
50 | list_add(&commac->list, &mal->list); | |
51 | ||
52 | spin_unlock_irqrestore(&mal->lock, flags); | |
53 | ||
54 | return 0; | |
55 | } | |
56 | ||
57 | void __devexit mal_unregister_commac(struct mal_instance *mal, | |
58 | struct mal_commac *commac) | |
59 | { | |
60 | unsigned long flags; | |
61 | ||
62 | spin_lock_irqsave(&mal->lock, flags); | |
63 | ||
64 | MAL_DBG(mal, "unreg(%08x, %08x)" NL, | |
65 | commac->tx_chan_mask, commac->rx_chan_mask); | |
66 | ||
67 | mal->tx_chan_mask &= ~commac->tx_chan_mask; | |
68 | mal->rx_chan_mask &= ~commac->rx_chan_mask; | |
69 | list_del_init(&commac->list); | |
70 | ||
71 | spin_unlock_irqrestore(&mal->lock, flags); | |
72 | } | |
73 | ||
74 | int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size) | |
75 | { | |
76 | BUG_ON(channel < 0 || channel >= mal->num_rx_chans || | |
77 | size > MAL_MAX_RX_SIZE); | |
78 | ||
79 | MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size); | |
80 | ||
81 | if (size & 0xf) { | |
82 | printk(KERN_WARNING | |
83 | "mal%d: incorrect RX size %lu for the channel %d\n", | |
84 | mal->index, size, channel); | |
85 | return -EINVAL; | |
86 | } | |
87 | ||
88 | set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4); | |
89 | return 0; | |
90 | } | |
91 | ||
92 | int mal_tx_bd_offset(struct mal_instance *mal, int channel) | |
93 | { | |
94 | BUG_ON(channel < 0 || channel >= mal->num_tx_chans); | |
95 | ||
96 | return channel * NUM_TX_BUFF; | |
97 | } | |
98 | ||
99 | int mal_rx_bd_offset(struct mal_instance *mal, int channel) | |
100 | { | |
101 | BUG_ON(channel < 0 || channel >= mal->num_rx_chans); | |
102 | return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF; | |
103 | } | |
104 | ||
105 | void mal_enable_tx_channel(struct mal_instance *mal, int channel) | |
106 | { | |
107 | unsigned long flags; | |
108 | ||
109 | spin_lock_irqsave(&mal->lock, flags); | |
110 | ||
111 | MAL_DBG(mal, "enable_tx(%d)" NL, channel); | |
112 | ||
113 | set_mal_dcrn(mal, MAL_TXCASR, | |
114 | get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel)); | |
115 | ||
116 | spin_unlock_irqrestore(&mal->lock, flags); | |
117 | } | |
118 | ||
119 | void mal_disable_tx_channel(struct mal_instance *mal, int channel) | |
120 | { | |
121 | set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel)); | |
122 | ||
123 | MAL_DBG(mal, "disable_tx(%d)" NL, channel); | |
124 | } | |
125 | ||
126 | void mal_enable_rx_channel(struct mal_instance *mal, int channel) | |
127 | { | |
128 | unsigned long flags; | |
129 | ||
130 | spin_lock_irqsave(&mal->lock, flags); | |
131 | ||
132 | MAL_DBG(mal, "enable_rx(%d)" NL, channel); | |
133 | ||
134 | set_mal_dcrn(mal, MAL_RXCASR, | |
135 | get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel)); | |
136 | ||
137 | spin_unlock_irqrestore(&mal->lock, flags); | |
138 | } | |
139 | ||
140 | void mal_disable_rx_channel(struct mal_instance *mal, int channel) | |
141 | { | |
142 | set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel)); | |
143 | ||
144 | MAL_DBG(mal, "disable_rx(%d)" NL, channel); | |
145 | } | |
146 | ||
147 | void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac) | |
148 | { | |
149 | unsigned long flags; | |
150 | ||
151 | spin_lock_irqsave(&mal->lock, flags); | |
152 | ||
153 | MAL_DBG(mal, "poll_add(%p)" NL, commac); | |
154 | ||
155 | /* starts disabled */ | |
156 | set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); | |
157 | ||
158 | list_add_tail(&commac->poll_list, &mal->poll_list); | |
159 | ||
160 | spin_unlock_irqrestore(&mal->lock, flags); | |
161 | } | |
162 | ||
163 | void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac) | |
164 | { | |
165 | unsigned long flags; | |
166 | ||
167 | spin_lock_irqsave(&mal->lock, flags); | |
168 | ||
169 | MAL_DBG(mal, "poll_del(%p)" NL, commac); | |
170 | ||
171 | list_del(&commac->poll_list); | |
172 | ||
173 | spin_unlock_irqrestore(&mal->lock, flags); | |
174 | } | |
175 | ||
176 | /* synchronized by mal_poll() */ | |
177 | static inline void mal_enable_eob_irq(struct mal_instance *mal) | |
178 | { | |
179 | MAL_DBG2(mal, "enable_irq" NL); | |
180 | ||
181 | // XXX might want to cache MAL_CFG as the DCR read can be slooooow | |
182 | set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE); | |
183 | } | |
184 | ||
185 | /* synchronized by __LINK_STATE_RX_SCHED bit in ndev->state */ | |
186 | static inline void mal_disable_eob_irq(struct mal_instance *mal) | |
187 | { | |
188 | // XXX might want to cache MAL_CFG as the DCR read can be slooooow | |
189 | set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE); | |
190 | ||
191 | MAL_DBG2(mal, "disable_irq" NL); | |
192 | } | |
193 | ||
194 | static irqreturn_t mal_serr(int irq, void *dev_instance) | |
195 | { | |
196 | struct mal_instance *mal = dev_instance; | |
197 | ||
198 | u32 esr = get_mal_dcrn(mal, MAL_ESR); | |
199 | ||
200 | /* Clear the error status register */ | |
201 | set_mal_dcrn(mal, MAL_ESR, esr); | |
202 | ||
203 | MAL_DBG(mal, "SERR %08x" NL, esr); | |
204 | ||
205 | if (esr & MAL_ESR_EVB) { | |
206 | if (esr & MAL_ESR_DE) { | |
207 | /* We ignore Descriptor error, | |
208 | * TXDE or RXDE interrupt will be generated anyway. | |
209 | */ | |
210 | return IRQ_HANDLED; | |
211 | } | |
212 | ||
213 | if (esr & MAL_ESR_PEIN) { | |
214 | /* PLB error, it's probably buggy hardware or | |
215 | * incorrect physical address in BD (i.e. bug) | |
216 | */ | |
217 | if (net_ratelimit()) | |
218 | printk(KERN_ERR | |
219 | "mal%d: system error, " | |
220 | "PLB (ESR = 0x%08x)\n", | |
221 | mal->index, esr); | |
222 | return IRQ_HANDLED; | |
223 | } | |
224 | ||
225 | /* OPB error, it's probably buggy hardware or incorrect | |
226 | * EBC setup | |
227 | */ | |
228 | if (net_ratelimit()) | |
229 | printk(KERN_ERR | |
230 | "mal%d: system error, OPB (ESR = 0x%08x)\n", | |
231 | mal->index, esr); | |
232 | } | |
233 | return IRQ_HANDLED; | |
234 | } | |
235 | ||
236 | static inline void mal_schedule_poll(struct mal_instance *mal) | |
237 | { | |
59e90b2d | 238 | if (likely(napi_schedule_prep(&mal->napi))) { |
1d3bb996 DG |
239 | MAL_DBG2(mal, "schedule_poll" NL); |
240 | mal_disable_eob_irq(mal); | |
59e90b2d | 241 | __napi_schedule(&mal->napi); |
1d3bb996 DG |
242 | } else |
243 | MAL_DBG2(mal, "already in poll" NL); | |
244 | } | |
245 | ||
246 | static irqreturn_t mal_txeob(int irq, void *dev_instance) | |
247 | { | |
248 | struct mal_instance *mal = dev_instance; | |
249 | ||
250 | u32 r = get_mal_dcrn(mal, MAL_TXEOBISR); | |
251 | ||
252 | MAL_DBG2(mal, "txeob %08x" NL, r); | |
253 | ||
254 | mal_schedule_poll(mal); | |
255 | set_mal_dcrn(mal, MAL_TXEOBISR, r); | |
256 | ||
257 | return IRQ_HANDLED; | |
258 | } | |
259 | ||
260 | static irqreturn_t mal_rxeob(int irq, void *dev_instance) | |
261 | { | |
262 | struct mal_instance *mal = dev_instance; | |
263 | ||
264 | u32 r = get_mal_dcrn(mal, MAL_RXEOBISR); | |
265 | ||
266 | MAL_DBG2(mal, "rxeob %08x" NL, r); | |
267 | ||
268 | mal_schedule_poll(mal); | |
269 | set_mal_dcrn(mal, MAL_RXEOBISR, r); | |
270 | ||
271 | return IRQ_HANDLED; | |
272 | } | |
273 | ||
274 | static irqreturn_t mal_txde(int irq, void *dev_instance) | |
275 | { | |
276 | struct mal_instance *mal = dev_instance; | |
277 | ||
278 | u32 deir = get_mal_dcrn(mal, MAL_TXDEIR); | |
279 | set_mal_dcrn(mal, MAL_TXDEIR, deir); | |
280 | ||
281 | MAL_DBG(mal, "txde %08x" NL, deir); | |
282 | ||
283 | if (net_ratelimit()) | |
284 | printk(KERN_ERR | |
285 | "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n", | |
286 | mal->index, deir); | |
287 | ||
288 | return IRQ_HANDLED; | |
289 | } | |
290 | ||
291 | static irqreturn_t mal_rxde(int irq, void *dev_instance) | |
292 | { | |
293 | struct mal_instance *mal = dev_instance; | |
294 | struct list_head *l; | |
295 | ||
296 | u32 deir = get_mal_dcrn(mal, MAL_RXDEIR); | |
297 | ||
298 | MAL_DBG(mal, "rxde %08x" NL, deir); | |
299 | ||
300 | list_for_each(l, &mal->list) { | |
301 | struct mal_commac *mc = list_entry(l, struct mal_commac, list); | |
302 | if (deir & mc->rx_chan_mask) { | |
303 | set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags); | |
304 | mc->ops->rxde(mc->dev); | |
305 | } | |
306 | } | |
307 | ||
308 | mal_schedule_poll(mal); | |
309 | set_mal_dcrn(mal, MAL_RXDEIR, deir); | |
310 | ||
311 | return IRQ_HANDLED; | |
312 | } | |
313 | ||
314 | void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac) | |
315 | { | |
316 | /* Spinlock-type semantics: only one caller disable poll at a time */ | |
317 | while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags)) | |
318 | msleep(1); | |
319 | ||
320 | /* Synchronize with the MAL NAPI poller. */ | |
59e90b2d | 321 | napi_disable(&mal->napi); |
1d3bb996 DG |
322 | } |
323 | ||
324 | void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) | |
325 | { | |
326 | smp_wmb(); | |
327 | clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); | |
328 | ||
329 | // XXX might want to kick a poll now... | |
330 | } | |
331 | ||
59e90b2d | 332 | static int mal_poll(struct napi_struct *napi, int budget) |
1d3bb996 | 333 | { |
59e90b2d | 334 | struct mal_instance *mal = container_of(napi, struct mal_instance, napi); |
1d3bb996 | 335 | struct list_head *l; |
59e90b2d | 336 | int received = 0; |
1d3bb996 DG |
337 | unsigned long flags; |
338 | ||
339 | MAL_DBG2(mal, "poll(%d) %d ->" NL, *budget, | |
340 | rx_work_limit); | |
341 | again: | |
342 | /* Process TX skbs */ | |
343 | list_for_each(l, &mal->poll_list) { | |
344 | struct mal_commac *mc = | |
345 | list_entry(l, struct mal_commac, poll_list); | |
346 | mc->ops->poll_tx(mc->dev); | |
347 | } | |
348 | ||
349 | /* Process RX skbs. | |
350 | * | |
351 | * We _might_ need something more smart here to enforce polling | |
352 | * fairness. | |
353 | */ | |
354 | list_for_each(l, &mal->poll_list) { | |
355 | struct mal_commac *mc = | |
356 | list_entry(l, struct mal_commac, poll_list); | |
357 | int n; | |
358 | if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) | |
359 | continue; | |
59e90b2d | 360 | n = mc->ops->poll_rx(mc->dev, budget); |
1d3bb996 DG |
361 | if (n) { |
362 | received += n; | |
59e90b2d RD |
363 | budget -= n; |
364 | if (budget <= 0) | |
365 | goto more_work; // XXX What if this is the last one ? | |
1d3bb996 DG |
366 | } |
367 | } | |
368 | ||
369 | /* We need to disable IRQs to protect from RXDE IRQ here */ | |
370 | spin_lock_irqsave(&mal->lock, flags); | |
59e90b2d | 371 | __napi_complete(napi); |
1d3bb996 DG |
372 | mal_enable_eob_irq(mal); |
373 | spin_unlock_irqrestore(&mal->lock, flags); | |
374 | ||
1d3bb996 DG |
375 | /* Check for "rotting" packet(s) */ |
376 | list_for_each(l, &mal->poll_list) { | |
377 | struct mal_commac *mc = | |
378 | list_entry(l, struct mal_commac, poll_list); | |
379 | if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) | |
380 | continue; | |
381 | if (unlikely(mc->ops->peek_rx(mc->dev) || | |
382 | test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) { | |
383 | MAL_DBG2(mal, "rotting packet" NL); | |
59e90b2d | 384 | if (napi_reschedule(napi)) |
1d3bb996 DG |
385 | mal_disable_eob_irq(mal); |
386 | else | |
387 | MAL_DBG2(mal, "already in poll list" NL); | |
388 | ||
59e90b2d | 389 | if (budget > 0) |
1d3bb996 DG |
390 | goto again; |
391 | else | |
392 | goto more_work; | |
393 | } | |
394 | mc->ops->poll_tx(mc->dev); | |
395 | } | |
396 | ||
397 | more_work: | |
59e90b2d RD |
398 | MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received); |
399 | return received; | |
1d3bb996 DG |
400 | } |
401 | ||
402 | static void mal_reset(struct mal_instance *mal) | |
403 | { | |
404 | int n = 10; | |
405 | ||
406 | MAL_DBG(mal, "reset" NL); | |
407 | ||
408 | set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR); | |
409 | ||
410 | /* Wait for reset to complete (1 system clock) */ | |
411 | while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n) | |
412 | --n; | |
413 | ||
414 | if (unlikely(!n)) | |
415 | printk(KERN_ERR "mal%d: reset timeout\n", mal->index); | |
416 | } | |
417 | ||
418 | int mal_get_regs_len(struct mal_instance *mal) | |
419 | { | |
420 | return sizeof(struct emac_ethtool_regs_subhdr) + | |
421 | sizeof(struct mal_regs); | |
422 | } | |
423 | ||
424 | void *mal_dump_regs(struct mal_instance *mal, void *buf) | |
425 | { | |
426 | struct emac_ethtool_regs_subhdr *hdr = buf; | |
427 | struct mal_regs *regs = (struct mal_regs *)(hdr + 1); | |
428 | int i; | |
429 | ||
430 | hdr->version = mal->version; | |
431 | hdr->index = mal->index; | |
432 | ||
433 | regs->tx_count = mal->num_tx_chans; | |
434 | regs->rx_count = mal->num_rx_chans; | |
435 | ||
436 | regs->cfg = get_mal_dcrn(mal, MAL_CFG); | |
437 | regs->esr = get_mal_dcrn(mal, MAL_ESR); | |
438 | regs->ier = get_mal_dcrn(mal, MAL_IER); | |
439 | regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR); | |
440 | regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR); | |
441 | regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR); | |
442 | regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR); | |
443 | regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR); | |
444 | regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR); | |
445 | regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR); | |
446 | regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR); | |
447 | ||
448 | for (i = 0; i < regs->tx_count; ++i) | |
449 | regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i)); | |
450 | ||
451 | for (i = 0; i < regs->rx_count; ++i) { | |
452 | regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i)); | |
453 | regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i)); | |
454 | } | |
455 | return regs + 1; | |
456 | } | |
457 | ||
458 | static int __devinit mal_probe(struct of_device *ofdev, | |
459 | const struct of_device_id *match) | |
460 | { | |
461 | struct mal_instance *mal; | |
462 | int err = 0, i, bd_size; | |
463 | int index = mal_count++; | |
464 | const u32 *prop; | |
465 | u32 cfg; | |
466 | ||
467 | mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL); | |
468 | if (!mal) { | |
469 | printk(KERN_ERR | |
470 | "mal%d: out of memory allocating MAL structure!\n", | |
471 | index); | |
472 | return -ENOMEM; | |
473 | } | |
474 | mal->index = index; | |
475 | mal->ofdev = ofdev; | |
476 | mal->version = of_device_is_compatible(ofdev->node, "ibm,mcmal2") ? 2 : 1; | |
477 | ||
478 | MAL_DBG(mal, "probe" NL); | |
479 | ||
480 | prop = of_get_property(ofdev->node, "num-tx-chans", NULL); | |
481 | if (prop == NULL) { | |
482 | printk(KERN_ERR | |
483 | "mal%d: can't find MAL num-tx-chans property!\n", | |
484 | index); | |
485 | err = -ENODEV; | |
486 | goto fail; | |
487 | } | |
488 | mal->num_tx_chans = prop[0]; | |
489 | ||
490 | prop = of_get_property(ofdev->node, "num-rx-chans", NULL); | |
491 | if (prop == NULL) { | |
492 | printk(KERN_ERR | |
493 | "mal%d: can't find MAL num-rx-chans property!\n", | |
494 | index); | |
495 | err = -ENODEV; | |
496 | goto fail; | |
497 | } | |
498 | mal->num_rx_chans = prop[0]; | |
499 | ||
500 | mal->dcr_base = dcr_resource_start(ofdev->node, 0); | |
501 | if (mal->dcr_base == 0) { | |
502 | printk(KERN_ERR | |
503 | "mal%d: can't find DCR resource!\n", index); | |
504 | err = -ENODEV; | |
505 | goto fail; | |
506 | } | |
507 | mal->dcr_host = dcr_map(ofdev->node, mal->dcr_base, 0x100); | |
508 | if (!DCR_MAP_OK(mal->dcr_host)) { | |
509 | printk(KERN_ERR | |
510 | "mal%d: failed to map DCRs !\n", index); | |
511 | err = -ENODEV; | |
512 | goto fail; | |
513 | } | |
514 | ||
515 | mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0); | |
516 | mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1); | |
517 | mal->serr_irq = irq_of_parse_and_map(ofdev->node, 2); | |
518 | mal->txde_irq = irq_of_parse_and_map(ofdev->node, 3); | |
519 | mal->rxde_irq = irq_of_parse_and_map(ofdev->node, 4); | |
520 | if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ || | |
521 | mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ || | |
522 | mal->rxde_irq == NO_IRQ) { | |
523 | printk(KERN_ERR | |
524 | "mal%d: failed to map interrupts !\n", index); | |
525 | err = -ENODEV; | |
526 | goto fail_unmap; | |
527 | } | |
528 | ||
529 | INIT_LIST_HEAD(&mal->poll_list); | |
59e90b2d RD |
530 | mal->napi.weight = CONFIG_IBM_NEW_EMAC_POLL_WEIGHT; |
531 | mal->napi.poll = mal_poll; | |
1d3bb996 DG |
532 | INIT_LIST_HEAD(&mal->list); |
533 | spin_lock_init(&mal->lock); | |
534 | ||
535 | /* Load power-on reset defaults */ | |
536 | mal_reset(mal); | |
537 | ||
538 | /* Set the MAL configuration register */ | |
539 | cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT; | |
540 | cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA; | |
541 | ||
542 | /* Current Axon is not happy with priority being non-0, it can | |
543 | * deadlock, fix it up here | |
544 | */ | |
545 | if (of_device_is_compatible(ofdev->node, "ibm,mcmal-axon")) | |
546 | cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10); | |
547 | ||
548 | /* Apply configuration */ | |
549 | set_mal_dcrn(mal, MAL_CFG, cfg); | |
550 | ||
551 | /* Allocate space for BD rings */ | |
552 | BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32); | |
553 | BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32); | |
554 | ||
555 | bd_size = sizeof(struct mal_descriptor) * | |
556 | (NUM_TX_BUFF * mal->num_tx_chans + | |
557 | NUM_RX_BUFF * mal->num_rx_chans); | |
558 | mal->bd_virt = | |
559 | dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, | |
560 | GFP_KERNEL); | |
561 | if (mal->bd_virt == NULL) { | |
562 | printk(KERN_ERR | |
563 | "mal%d: out of memory allocating RX/TX descriptors!\n", | |
564 | index); | |
565 | err = -ENOMEM; | |
566 | goto fail_unmap; | |
567 | } | |
568 | memset(mal->bd_virt, 0, bd_size); | |
569 | ||
570 | for (i = 0; i < mal->num_tx_chans; ++i) | |
571 | set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma + | |
572 | sizeof(struct mal_descriptor) * | |
573 | mal_tx_bd_offset(mal, i)); | |
574 | ||
575 | for (i = 0; i < mal->num_rx_chans; ++i) | |
576 | set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma + | |
577 | sizeof(struct mal_descriptor) * | |
578 | mal_rx_bd_offset(mal, i)); | |
579 | ||
580 | err = request_irq(mal->serr_irq, mal_serr, 0, "MAL SERR", mal); | |
581 | if (err) | |
582 | goto fail2; | |
583 | err = request_irq(mal->txde_irq, mal_txde, 0, "MAL TX DE", mal); | |
584 | if (err) | |
585 | goto fail3; | |
586 | err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal); | |
587 | if (err) | |
588 | goto fail4; | |
589 | err = request_irq(mal->rxde_irq, mal_rxde, 0, "MAL RX DE", mal); | |
590 | if (err) | |
591 | goto fail5; | |
592 | err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal); | |
593 | if (err) | |
594 | goto fail6; | |
595 | ||
596 | /* Enable all MAL SERR interrupt sources */ | |
597 | if (mal->version == 2) | |
598 | set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS); | |
599 | else | |
600 | set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS); | |
601 | ||
602 | /* Enable EOB interrupt */ | |
603 | mal_enable_eob_irq(mal); | |
604 | ||
605 | printk(KERN_INFO | |
606 | "MAL v%d %s, %d TX channels, %d RX channels\n", | |
607 | mal->version, ofdev->node->full_name, | |
608 | mal->num_tx_chans, mal->num_rx_chans); | |
609 | ||
610 | /* Advertise this instance to the rest of the world */ | |
611 | wmb(); | |
612 | dev_set_drvdata(&ofdev->dev, mal); | |
613 | ||
614 | mal_dbg_register(mal); | |
615 | ||
616 | return 0; | |
617 | ||
618 | fail6: | |
619 | free_irq(mal->rxde_irq, mal); | |
620 | fail5: | |
621 | free_irq(mal->txeob_irq, mal); | |
622 | fail4: | |
623 | free_irq(mal->txde_irq, mal); | |
624 | fail3: | |
625 | free_irq(mal->serr_irq, mal); | |
626 | fail2: | |
627 | dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma); | |
628 | fail_unmap: | |
629 | dcr_unmap(mal->dcr_host, mal->dcr_base, 0x100); | |
630 | fail: | |
631 | kfree(mal); | |
632 | ||
633 | return err; | |
634 | } | |
635 | ||
636 | static int __devexit mal_remove(struct of_device *ofdev) | |
637 | { | |
638 | struct mal_instance *mal = dev_get_drvdata(&ofdev->dev); | |
639 | ||
640 | MAL_DBG(mal, "remove" NL); | |
641 | ||
59e90b2d RD |
642 | /* Synchronize with scheduled polling */ |
643 | napi_disable(&mal->napi); | |
1d3bb996 DG |
644 | |
645 | if (!list_empty(&mal->list)) { | |
646 | /* This is *very* bad */ | |
647 | printk(KERN_EMERG | |
648 | "mal%d: commac list is not empty on remove!\n", | |
649 | mal->index); | |
650 | WARN_ON(1); | |
651 | } | |
652 | ||
653 | dev_set_drvdata(&ofdev->dev, NULL); | |
654 | ||
655 | free_irq(mal->serr_irq, mal); | |
656 | free_irq(mal->txde_irq, mal); | |
657 | free_irq(mal->txeob_irq, mal); | |
658 | free_irq(mal->rxde_irq, mal); | |
659 | free_irq(mal->rxeob_irq, mal); | |
660 | ||
661 | mal_reset(mal); | |
662 | ||
663 | mal_dbg_unregister(mal); | |
664 | ||
665 | dma_free_coherent(&ofdev->dev, | |
666 | sizeof(struct mal_descriptor) * | |
667 | (NUM_TX_BUFF * mal->num_tx_chans + | |
668 | NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt, | |
669 | mal->bd_dma); | |
670 | kfree(mal); | |
671 | ||
672 | return 0; | |
673 | } | |
674 | ||
675 | static struct of_device_id mal_platform_match[] = | |
676 | { | |
677 | { | |
678 | .compatible = "ibm,mcmal", | |
679 | }, | |
680 | { | |
681 | .compatible = "ibm,mcmal2", | |
682 | }, | |
683 | /* Backward compat */ | |
684 | { | |
685 | .type = "mcmal-dma", | |
686 | .compatible = "ibm,mcmal", | |
687 | }, | |
688 | { | |
689 | .type = "mcmal-dma", | |
690 | .compatible = "ibm,mcmal2", | |
691 | }, | |
692 | {}, | |
693 | }; | |
694 | ||
695 | static struct of_platform_driver mal_of_driver = { | |
696 | .name = "mcmal", | |
697 | .match_table = mal_platform_match, | |
698 | ||
699 | .probe = mal_probe, | |
700 | .remove = mal_remove, | |
701 | }; | |
702 | ||
703 | int __init mal_init(void) | |
704 | { | |
705 | return of_register_platform_driver(&mal_of_driver); | |
706 | } | |
707 | ||
708 | void mal_exit(void) | |
709 | { | |
710 | of_unregister_platform_driver(&mal_of_driver); | |
711 | } |