Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/drivers/net/irda/sa1100_ir.c | |
3 | * | |
4 | * Copyright (C) 2000-2001 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * Infra-red driver for the StrongARM SA1100 embedded microprocessor | |
11 | * | |
12 | * Note that we don't have to worry about the SA1111's DMA bugs in here, | |
13 | * so we use the straight forward dma_map_* functions with a null pointer. | |
14 | * | |
15 | * This driver takes one kernel command line parameter, sa1100ir=, with | |
16 | * the following options: | |
17 | * max_rate:baudrate - set the maximum baud rate | |
15877e9c | 18 | * power_level:level - set the transmitter power level |
1da177e4 LT |
19 | * tx_lpm:0|1 - set transmit low power mode |
20 | */ | |
1da177e4 LT |
21 | #include <linux/module.h> |
22 | #include <linux/moduleparam.h> | |
23 | #include <linux/types.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/errno.h> | |
26 | #include <linux/netdevice.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/rtnetlink.h> | |
29 | #include <linux/interrupt.h> | |
30 | #include <linux/delay.h> | |
d052d1be | 31 | #include <linux/platform_device.h> |
1da177e4 | 32 | #include <linux/dma-mapping.h> |
bf95154f RK |
33 | #include <linux/dmaengine.h> |
34 | #include <linux/sa11x0-dma.h> | |
1da177e4 LT |
35 | |
36 | #include <net/irda/irda.h> | |
37 | #include <net/irda/wrapper.h> | |
38 | #include <net/irda/irda_device.h> | |
39 | ||
a09e64fb | 40 | #include <mach/hardware.h> |
1da177e4 LT |
41 | #include <asm/mach/irda.h> |
42 | ||
43 | static int power_level = 3; | |
44 | static int tx_lpm; | |
45 | static int max_rate = 4000000; | |
46 | ||
885767ca | 47 | struct sa1100_buf { |
3c500a35 | 48 | struct device *dev; |
885767ca | 49 | struct sk_buff *skb; |
32273f50 | 50 | struct scatterlist sg; |
bf95154f RK |
51 | struct dma_chan *chan; |
52 | dma_cookie_t cookie; | |
885767ca RK |
53 | }; |
54 | ||
1da177e4 | 55 | struct sa1100_irda { |
1da177e4 LT |
56 | unsigned char utcr4; |
57 | unsigned char power; | |
58 | unsigned char open; | |
59 | ||
60 | int speed; | |
61 | int newspeed; | |
62 | ||
885767ca RK |
63 | struct sa1100_buf dma_rx; |
64 | struct sa1100_buf dma_tx; | |
1da177e4 | 65 | |
1da177e4 LT |
66 | struct device *dev; |
67 | struct irda_platform_data *pdata; | |
68 | struct irlap_cb *irlap; | |
69 | struct qos_info qos; | |
70 | ||
71 | iobuff_t tx_buff; | |
72 | iobuff_t rx_buff; | |
3d26db13 RK |
73 | |
74 | int (*tx_start)(struct sk_buff *, struct net_device *, struct sa1100_irda *); | |
374f7739 | 75 | irqreturn_t (*irq)(struct net_device *, struct sa1100_irda *); |
1da177e4 LT |
76 | }; |
77 | ||
0e888ee3 RK |
78 | static int sa1100_irda_set_speed(struct sa1100_irda *, int); |
79 | ||
1da177e4 LT |
80 | #define IS_FIR(si) ((si)->speed >= 4000000) |
81 | ||
82 | #define HPSIR_MAX_RXLEN 2047 | |
83 | ||
d138dacb RK |
84 | static struct dma_slave_config sa1100_irda_sir_tx = { |
85 | .direction = DMA_TO_DEVICE, | |
86 | .dst_addr = __PREG(Ser2UTDR), | |
87 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, | |
88 | .dst_maxburst = 4, | |
89 | }; | |
90 | ||
bf95154f RK |
91 | static struct dma_slave_config sa1100_irda_fir_rx = { |
92 | .direction = DMA_FROM_DEVICE, | |
93 | .src_addr = __PREG(Ser2HSDR), | |
94 | .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, | |
95 | .src_maxburst = 8, | |
96 | }; | |
97 | ||
98 | static struct dma_slave_config sa1100_irda_fir_tx = { | |
99 | .direction = DMA_TO_DEVICE, | |
100 | .dst_addr = __PREG(Ser2HSDR), | |
101 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, | |
102 | .dst_maxburst = 8, | |
103 | }; | |
104 | ||
105 | static unsigned sa1100_irda_dma_xferred(struct sa1100_buf *buf) | |
106 | { | |
107 | struct dma_chan *chan = buf->chan; | |
108 | struct dma_tx_state state; | |
109 | enum dma_status status; | |
110 | ||
111 | status = chan->device->device_tx_status(chan, buf->cookie, &state); | |
112 | if (status != DMA_PAUSED) | |
113 | return 0; | |
114 | ||
115 | return sg_dma_len(&buf->sg) - state.residue; | |
116 | } | |
117 | ||
118 | static int sa1100_irda_dma_request(struct device *dev, struct sa1100_buf *buf, | |
119 | const char *name, struct dma_slave_config *cfg) | |
120 | { | |
121 | dma_cap_mask_t m; | |
122 | int ret; | |
123 | ||
124 | dma_cap_zero(m); | |
125 | dma_cap_set(DMA_SLAVE, m); | |
126 | ||
127 | buf->chan = dma_request_channel(m, sa11x0_dma_filter_fn, (void *)name); | |
128 | if (!buf->chan) { | |
129 | dev_err(dev, "unable to request DMA channel for %s\n", | |
130 | name); | |
131 | return -ENOENT; | |
132 | } | |
133 | ||
134 | ret = dmaengine_slave_config(buf->chan, cfg); | |
135 | if (ret) | |
136 | dev_warn(dev, "DMA slave_config for %s returned %d\n", | |
137 | name, ret); | |
138 | ||
139 | buf->dev = buf->chan->device->dev; | |
140 | ||
141 | return 0; | |
142 | } | |
143 | ||
144 | static void sa1100_irda_dma_start(struct sa1100_buf *buf, | |
145 | enum dma_transfer_direction dir, dma_async_tx_callback cb, void *cb_p) | |
146 | { | |
147 | struct dma_async_tx_descriptor *desc; | |
148 | struct dma_chan *chan = buf->chan; | |
149 | ||
d9d54540 | 150 | desc = dmaengine_prep_slave_sg(chan, &buf->sg, 1, dir, |
bf95154f RK |
151 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
152 | if (desc) { | |
153 | desc->callback = cb; | |
154 | desc->callback_param = cb_p; | |
155 | buf->cookie = dmaengine_submit(desc); | |
156 | dma_async_issue_pending(chan); | |
157 | } | |
158 | } | |
159 | ||
1da177e4 LT |
160 | /* |
161 | * Allocate and map the receive buffer, unless it is already allocated. | |
162 | */ | |
163 | static int sa1100_irda_rx_alloc(struct sa1100_irda *si) | |
164 | { | |
885767ca | 165 | if (si->dma_rx.skb) |
1da177e4 LT |
166 | return 0; |
167 | ||
885767ca RK |
168 | si->dma_rx.skb = alloc_skb(HPSIR_MAX_RXLEN + 1, GFP_ATOMIC); |
169 | if (!si->dma_rx.skb) { | |
1da177e4 LT |
170 | printk(KERN_ERR "sa1100_ir: out of memory for RX SKB\n"); |
171 | return -ENOMEM; | |
172 | } | |
173 | ||
174 | /* | |
175 | * Align any IP headers that may be contained | |
176 | * within the frame. | |
177 | */ | |
885767ca | 178 | skb_reserve(si->dma_rx.skb, 1); |
1da177e4 | 179 | |
32273f50 | 180 | sg_set_buf(&si->dma_rx.sg, si->dma_rx.skb->data, HPSIR_MAX_RXLEN); |
3c500a35 | 181 | if (dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE) == 0) { |
885767ca | 182 | dev_kfree_skb_any(si->dma_rx.skb); |
22f0bf96 RK |
183 | return -ENOMEM; |
184 | } | |
185 | ||
1da177e4 LT |
186 | return 0; |
187 | } | |
188 | ||
189 | /* | |
190 | * We want to get here as soon as possible, and get the receiver setup. | |
191 | * We use the existing buffer. | |
192 | */ | |
193 | static void sa1100_irda_rx_dma_start(struct sa1100_irda *si) | |
194 | { | |
885767ca | 195 | if (!si->dma_rx.skb) { |
1da177e4 LT |
196 | printk(KERN_ERR "sa1100_ir: rx buffer went missing\n"); |
197 | return; | |
198 | } | |
199 | ||
200 | /* | |
201 | * First empty receive FIFO | |
202 | */ | |
6a7f4911 | 203 | Ser2HSCR0 = HSCR0_HSSP; |
1da177e4 LT |
204 | |
205 | /* | |
206 | * Enable the DMA, receiver and receive interrupt. | |
207 | */ | |
bf95154f RK |
208 | dmaengine_terminate_all(si->dma_rx.chan); |
209 | sa1100_irda_dma_start(&si->dma_rx, DMA_DEV_TO_MEM, NULL, NULL); | |
210 | ||
6a7f4911 | 211 | Ser2HSCR0 = HSCR0_HSSP | HSCR0_RXE; |
1da177e4 LT |
212 | } |
213 | ||
0e888ee3 RK |
214 | static void sa1100_irda_check_speed(struct sa1100_irda *si) |
215 | { | |
216 | if (si->newspeed) { | |
217 | sa1100_irda_set_speed(si, si->newspeed); | |
218 | si->newspeed = 0; | |
219 | } | |
220 | } | |
221 | ||
3d26db13 RK |
222 | /* |
223 | * HP-SIR format support. | |
224 | */ | |
d138dacb RK |
225 | static void sa1100_irda_sirtxdma_irq(void *id) |
226 | { | |
227 | struct net_device *dev = id; | |
228 | struct sa1100_irda *si = netdev_priv(dev); | |
229 | ||
230 | dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE); | |
231 | dev_kfree_skb(si->dma_tx.skb); | |
232 | si->dma_tx.skb = NULL; | |
233 | ||
234 | dev->stats.tx_packets++; | |
235 | dev->stats.tx_bytes += sg_dma_len(&si->dma_tx.sg); | |
236 | ||
237 | /* We need to ensure that the transmitter has finished. */ | |
238 | do | |
239 | rmb(); | |
240 | while (Ser2UTSR1 & UTSR1_TBY); | |
241 | ||
242 | /* | |
243 | * Ok, we've finished transmitting. Now enable the receiver. | |
244 | * Sometimes we get a receive IRQ immediately after a transmit... | |
245 | */ | |
246 | Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; | |
247 | Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE; | |
248 | ||
249 | sa1100_irda_check_speed(si); | |
250 | ||
251 | /* I'm hungry! */ | |
252 | netif_wake_queue(dev); | |
253 | } | |
254 | ||
3d26db13 RK |
255 | static int sa1100_irda_sir_tx_start(struct sk_buff *skb, struct net_device *dev, |
256 | struct sa1100_irda *si) | |
257 | { | |
258 | si->tx_buff.data = si->tx_buff.head; | |
259 | si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, | |
260 | si->tx_buff.truesize); | |
261 | ||
d138dacb RK |
262 | si->dma_tx.skb = skb; |
263 | sg_set_buf(&si->dma_tx.sg, si->tx_buff.data, si->tx_buff.len); | |
264 | if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) { | |
265 | si->dma_tx.skb = NULL; | |
266 | netif_wake_queue(dev); | |
267 | dev->stats.tx_dropped++; | |
268 | return NETDEV_TX_OK; | |
269 | } | |
270 | ||
271 | sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_sirtxdma_irq, dev); | |
272 | ||
3d26db13 | 273 | /* |
d138dacb RK |
274 | * The mean turn-around time is enforced by XBOF padding, |
275 | * so we don't have to do anything special here. | |
3d26db13 | 276 | */ |
d138dacb | 277 | Ser2UTCR3 = UTCR3_TXE; |
3d26db13 RK |
278 | |
279 | return NETDEV_TX_OK; | |
280 | } | |
281 | ||
374f7739 | 282 | static irqreturn_t sa1100_irda_sir_irq(struct net_device *dev, struct sa1100_irda *si) |
1da177e4 | 283 | { |
1da177e4 LT |
284 | int status; |
285 | ||
286 | status = Ser2UTSR0; | |
287 | ||
288 | /* | |
289 | * Deal with any receive errors first. The bytes in error may be | |
290 | * the only bytes in the receive FIFO, so we do this first. | |
291 | */ | |
292 | while (status & UTSR0_EIF) { | |
293 | int stat, data; | |
294 | ||
295 | stat = Ser2UTSR1; | |
296 | data = Ser2UTDR; | |
297 | ||
298 | if (stat & (UTSR1_FRE | UTSR1_ROR)) { | |
af049081 | 299 | dev->stats.rx_errors++; |
1da177e4 | 300 | if (stat & UTSR1_FRE) |
af049081 | 301 | dev->stats.rx_frame_errors++; |
1da177e4 | 302 | if (stat & UTSR1_ROR) |
af049081 | 303 | dev->stats.rx_fifo_errors++; |
1da177e4 | 304 | } else |
af049081 | 305 | async_unwrap_char(dev, &dev->stats, &si->rx_buff, data); |
1da177e4 LT |
306 | |
307 | status = Ser2UTSR0; | |
308 | } | |
309 | ||
310 | /* | |
311 | * We must clear certain bits. | |
312 | */ | |
313 | Ser2UTSR0 = status & (UTSR0_RID | UTSR0_RBB | UTSR0_REB); | |
314 | ||
315 | if (status & UTSR0_RFS) { | |
316 | /* | |
317 | * There are at least 4 bytes in the FIFO. Read 3 bytes | |
318 | * and leave the rest to the block below. | |
319 | */ | |
af049081 SH |
320 | async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR); |
321 | async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR); | |
322 | async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR); | |
1da177e4 LT |
323 | } |
324 | ||
325 | if (status & (UTSR0_RFS | UTSR0_RID)) { | |
326 | /* | |
327 | * Fifo contains more than 1 character. | |
328 | */ | |
329 | do { | |
af049081 | 330 | async_unwrap_char(dev, &dev->stats, &si->rx_buff, |
1da177e4 LT |
331 | Ser2UTDR); |
332 | } while (Ser2UTSR1 & UTSR1_RNE); | |
333 | ||
1da177e4 LT |
334 | } |
335 | ||
374f7739 | 336 | return IRQ_HANDLED; |
1da177e4 LT |
337 | } |
338 | ||
a6b2ea66 RK |
339 | /* |
340 | * FIR format support. | |
341 | */ | |
26f2bee1 RK |
342 | static void sa1100_irda_firtxdma_irq(void *id) |
343 | { | |
344 | struct net_device *dev = id; | |
345 | struct sa1100_irda *si = netdev_priv(dev); | |
346 | struct sk_buff *skb; | |
347 | ||
348 | /* | |
349 | * Wait for the transmission to complete. Unfortunately, | |
350 | * the hardware doesn't give us an interrupt to indicate | |
351 | * "end of frame". | |
352 | */ | |
353 | do | |
354 | rmb(); | |
355 | while (!(Ser2HSSR0 & HSSR0_TUR) || Ser2HSSR1 & HSSR1_TBY); | |
356 | ||
357 | /* | |
358 | * Clear the transmit underrun bit. | |
359 | */ | |
360 | Ser2HSSR0 = HSSR0_TUR; | |
361 | ||
362 | /* | |
363 | * Do we need to change speed? Note that we're lazy | |
364 | * here - we don't free the old dma_rx.skb. We don't need | |
365 | * to allocate a buffer either. | |
366 | */ | |
367 | sa1100_irda_check_speed(si); | |
368 | ||
369 | /* | |
370 | * Start reception. This disables the transmitter for | |
371 | * us. This will be using the existing RX buffer. | |
372 | */ | |
373 | sa1100_irda_rx_dma_start(si); | |
374 | ||
375 | /* Account and free the packet. */ | |
376 | skb = si->dma_tx.skb; | |
377 | if (skb) { | |
3c500a35 | 378 | dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, |
32273f50 | 379 | DMA_TO_DEVICE); |
26f2bee1 RK |
380 | dev->stats.tx_packets ++; |
381 | dev->stats.tx_bytes += skb->len; | |
382 | dev_kfree_skb_irq(skb); | |
383 | si->dma_tx.skb = NULL; | |
384 | } | |
385 | ||
386 | /* | |
387 | * Make sure that the TX queue is available for sending | |
388 | * (for retries). TX has priority over RX at all times. | |
389 | */ | |
390 | netif_wake_queue(dev); | |
391 | } | |
392 | ||
a6b2ea66 RK |
393 | static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev, |
394 | struct sa1100_irda *si) | |
395 | { | |
396 | int mtt = irda_get_mtt(skb); | |
397 | ||
398 | si->dma_tx.skb = skb; | |
32273f50 | 399 | sg_set_buf(&si->dma_tx.sg, skb->data, skb->len); |
3c500a35 | 400 | if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) { |
a6b2ea66 RK |
401 | si->dma_tx.skb = NULL; |
402 | netif_wake_queue(dev); | |
403 | dev->stats.tx_dropped++; | |
404 | dev_kfree_skb(skb); | |
405 | return NETDEV_TX_OK; | |
406 | } | |
407 | ||
bf95154f | 408 | sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_firtxdma_irq, dev); |
a6b2ea66 RK |
409 | |
410 | /* | |
411 | * If we have a mean turn-around time, impose the specified | |
412 | * specified delay. We could shorten this by timing from | |
413 | * the point we received the packet. | |
414 | */ | |
415 | if (mtt) | |
416 | udelay(mtt); | |
417 | ||
6a7f4911 | 418 | Ser2HSCR0 = HSCR0_HSSP | HSCR0_TXE; |
a6b2ea66 RK |
419 | |
420 | return NETDEV_TX_OK; | |
421 | } | |
422 | ||
1da177e4 LT |
423 | static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev) |
424 | { | |
885767ca | 425 | struct sk_buff *skb = si->dma_rx.skb; |
1da177e4 LT |
426 | unsigned int len, stat, data; |
427 | ||
428 | if (!skb) { | |
429 | printk(KERN_ERR "sa1100_ir: SKB is NULL!\n"); | |
430 | return; | |
431 | } | |
432 | ||
433 | /* | |
434 | * Get the current data position. | |
435 | */ | |
bf95154f | 436 | len = sa1100_irda_dma_xferred(&si->dma_rx); |
1da177e4 LT |
437 | if (len > HPSIR_MAX_RXLEN) |
438 | len = HPSIR_MAX_RXLEN; | |
3c500a35 | 439 | dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE); |
1da177e4 LT |
440 | |
441 | do { | |
442 | /* | |
443 | * Read Status, and then Data. | |
444 | */ | |
445 | stat = Ser2HSSR1; | |
446 | rmb(); | |
447 | data = Ser2HSDR; | |
448 | ||
449 | if (stat & (HSSR1_CRE | HSSR1_ROR)) { | |
af049081 | 450 | dev->stats.rx_errors++; |
1da177e4 | 451 | if (stat & HSSR1_CRE) |
af049081 | 452 | dev->stats.rx_crc_errors++; |
1da177e4 | 453 | if (stat & HSSR1_ROR) |
af049081 | 454 | dev->stats.rx_frame_errors++; |
1da177e4 LT |
455 | } else |
456 | skb->data[len++] = data; | |
457 | ||
458 | /* | |
459 | * If we hit the end of frame, there's | |
460 | * no point in continuing. | |
461 | */ | |
462 | if (stat & HSSR1_EOF) | |
463 | break; | |
464 | } while (Ser2HSSR0 & HSSR0_EIF); | |
465 | ||
466 | if (stat & HSSR1_EOF) { | |
885767ca | 467 | si->dma_rx.skb = NULL; |
1da177e4 LT |
468 | |
469 | skb_put(skb, len); | |
470 | skb->dev = dev; | |
459a98ed | 471 | skb_reset_mac_header(skb); |
1da177e4 | 472 | skb->protocol = htons(ETH_P_IRDA); |
af049081 SH |
473 | dev->stats.rx_packets++; |
474 | dev->stats.rx_bytes += len; | |
1da177e4 LT |
475 | |
476 | /* | |
477 | * Before we pass the buffer up, allocate a new one. | |
478 | */ | |
479 | sa1100_irda_rx_alloc(si); | |
480 | ||
481 | netif_rx(skb); | |
1da177e4 LT |
482 | } else { |
483 | /* | |
22f0bf96 RK |
484 | * Remap the buffer - it was previously mapped, and we |
485 | * hope that this succeeds. | |
1da177e4 | 486 | */ |
3c500a35 | 487 | dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE); |
1da177e4 LT |
488 | } |
489 | } | |
490 | ||
491 | /* | |
a6b2ea66 RK |
492 | * We only have to handle RX events here; transmit events go via the TX |
493 | * DMA handler. We disable RX, process, and the restart RX. | |
1da177e4 | 494 | */ |
374f7739 | 495 | static irqreturn_t sa1100_irda_fir_irq(struct net_device *dev, struct sa1100_irda *si) |
1da177e4 | 496 | { |
1da177e4 LT |
497 | /* |
498 | * Stop RX DMA | |
499 | */ | |
bf95154f | 500 | dmaengine_pause(si->dma_rx.chan); |
1da177e4 LT |
501 | |
502 | /* | |
503 | * Framing error - we throw away the packet completely. | |
504 | * Clearing RXE flushes the error conditions and data | |
505 | * from the fifo. | |
506 | */ | |
507 | if (Ser2HSSR0 & (HSSR0_FRE | HSSR0_RAB)) { | |
af049081 | 508 | dev->stats.rx_errors++; |
1da177e4 LT |
509 | |
510 | if (Ser2HSSR0 & HSSR0_FRE) | |
af049081 | 511 | dev->stats.rx_frame_errors++; |
1da177e4 LT |
512 | |
513 | /* | |
514 | * Clear out the DMA... | |
515 | */ | |
6a7f4911 | 516 | Ser2HSCR0 = HSCR0_HSSP; |
1da177e4 LT |
517 | |
518 | /* | |
519 | * Clear selected status bits now, so we | |
520 | * don't miss them next time around. | |
521 | */ | |
522 | Ser2HSSR0 = HSSR0_FRE | HSSR0_RAB; | |
523 | } | |
524 | ||
525 | /* | |
526 | * Deal with any receive errors. The any of the lowest | |
527 | * 8 bytes in the FIFO may contain an error. We must read | |
528 | * them one by one. The "error" could even be the end of | |
529 | * packet! | |
530 | */ | |
531 | if (Ser2HSSR0 & HSSR0_EIF) | |
532 | sa1100_irda_fir_error(si, dev); | |
533 | ||
534 | /* | |
535 | * No matter what happens, we must restart reception. | |
536 | */ | |
537 | sa1100_irda_rx_dma_start(si); | |
374f7739 RK |
538 | |
539 | return IRQ_HANDLED; | |
1da177e4 LT |
540 | } |
541 | ||
a6b2ea66 RK |
542 | /* |
543 | * Set the IrDA communications speed. | |
544 | */ | |
545 | static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) | |
546 | { | |
547 | unsigned long flags; | |
548 | int brd, ret = -EINVAL; | |
549 | ||
550 | switch (speed) { | |
551 | case 9600: case 19200: case 38400: | |
552 | case 57600: case 115200: | |
553 | brd = 3686400 / (16 * speed) - 1; | |
554 | ||
bf95154f | 555 | /* Stop the receive DMA, and configure transmit. */ |
d138dacb | 556 | if (IS_FIR(si)) { |
bf95154f | 557 | dmaengine_terminate_all(si->dma_rx.chan); |
d138dacb RK |
558 | dmaengine_slave_config(si->dma_tx.chan, |
559 | &sa1100_irda_sir_tx); | |
560 | } | |
a6b2ea66 RK |
561 | |
562 | local_irq_save(flags); | |
563 | ||
564 | Ser2UTCR3 = 0; | |
565 | Ser2HSCR0 = HSCR0_UART; | |
566 | ||
567 | Ser2UTCR1 = brd >> 8; | |
568 | Ser2UTCR2 = brd; | |
569 | ||
570 | /* | |
571 | * Clear status register | |
572 | */ | |
573 | Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; | |
574 | Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE; | |
575 | ||
576 | if (si->pdata->set_speed) | |
577 | si->pdata->set_speed(si->dev, speed); | |
578 | ||
579 | si->speed = speed; | |
580 | si->tx_start = sa1100_irda_sir_tx_start; | |
581 | si->irq = sa1100_irda_sir_irq; | |
582 | ||
583 | local_irq_restore(flags); | |
584 | ret = 0; | |
585 | break; | |
586 | ||
587 | case 4000000: | |
d138dacb RK |
588 | if (!IS_FIR(si)) |
589 | dmaengine_slave_config(si->dma_tx.chan, | |
590 | &sa1100_irda_fir_tx); | |
591 | ||
a6b2ea66 RK |
592 | local_irq_save(flags); |
593 | ||
a6b2ea66 | 594 | Ser2HSSR0 = 0xff; |
6a7f4911 | 595 | Ser2HSCR0 = HSCR0_HSSP; |
a6b2ea66 RK |
596 | Ser2UTCR3 = 0; |
597 | ||
598 | si->speed = speed; | |
599 | si->tx_start = sa1100_irda_fir_tx_start; | |
600 | si->irq = sa1100_irda_fir_irq; | |
601 | ||
602 | if (si->pdata->set_speed) | |
603 | si->pdata->set_speed(si->dev, speed); | |
604 | ||
605 | sa1100_irda_rx_alloc(si); | |
606 | sa1100_irda_rx_dma_start(si); | |
607 | ||
608 | local_irq_restore(flags); | |
609 | ||
610 | break; | |
611 | ||
612 | default: | |
613 | break; | |
614 | } | |
615 | ||
616 | return ret; | |
617 | } | |
618 | ||
619 | /* | |
620 | * Control the power state of the IrDA transmitter. | |
621 | * State: | |
622 | * 0 - off | |
623 | * 1 - short range, lowest power | |
624 | * 2 - medium range, medium power | |
625 | * 3 - maximum range, high power | |
626 | * | |
627 | * Currently, only assabet is known to support this. | |
628 | */ | |
629 | static int | |
630 | __sa1100_irda_set_power(struct sa1100_irda *si, unsigned int state) | |
631 | { | |
632 | int ret = 0; | |
633 | if (si->pdata->set_power) | |
634 | ret = si->pdata->set_power(si->dev, state); | |
635 | return ret; | |
636 | } | |
637 | ||
638 | static inline int | |
639 | sa1100_set_power(struct sa1100_irda *si, unsigned int state) | |
640 | { | |
641 | int ret; | |
642 | ||
643 | ret = __sa1100_irda_set_power(si, state); | |
644 | if (ret == 0) | |
645 | si->power = state; | |
646 | ||
647 | return ret; | |
648 | } | |
649 | ||
7d12e780 | 650 | static irqreturn_t sa1100_irda_irq(int irq, void *dev_id) |
1da177e4 LT |
651 | { |
652 | struct net_device *dev = dev_id; | |
374f7739 RK |
653 | struct sa1100_irda *si = netdev_priv(dev); |
654 | ||
655 | return si->irq(dev, si); | |
1da177e4 LT |
656 | } |
657 | ||
1da177e4 LT |
658 | static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) |
659 | { | |
4cf1653a | 660 | struct sa1100_irda *si = netdev_priv(dev); |
1da177e4 LT |
661 | int speed = irda_get_next_speed(skb); |
662 | ||
663 | /* | |
664 | * Does this packet contain a request to change the interface | |
665 | * speed? If so, remember it until we complete the transmission | |
666 | * of this frame. | |
667 | */ | |
668 | if (speed != si->speed && speed != -1) | |
669 | si->newspeed = speed; | |
670 | ||
3d26db13 | 671 | /* If this is an empty frame, we can bypass a lot. */ |
1da177e4 | 672 | if (skb->len == 0) { |
0e888ee3 | 673 | sa1100_irda_check_speed(si); |
1da177e4 | 674 | dev_kfree_skb(skb); |
6ed10654 | 675 | return NETDEV_TX_OK; |
1da177e4 LT |
676 | } |
677 | ||
3d26db13 | 678 | netif_stop_queue(dev); |
1da177e4 | 679 | |
3d26db13 RK |
680 | /* We must not already have a skb to transmit... */ |
681 | BUG_ON(si->dma_tx.skb); | |
1da177e4 | 682 | |
3d26db13 | 683 | return si->tx_start(skb, dev, si); |
1da177e4 LT |
684 | } |
685 | ||
686 | static int | |
687 | sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd) | |
688 | { | |
689 | struct if_irda_req *rq = (struct if_irda_req *)ifreq; | |
4cf1653a | 690 | struct sa1100_irda *si = netdev_priv(dev); |
1da177e4 LT |
691 | int ret = -EOPNOTSUPP; |
692 | ||
693 | switch (cmd) { | |
694 | case SIOCSBANDWIDTH: | |
695 | if (capable(CAP_NET_ADMIN)) { | |
696 | /* | |
697 | * We are unable to set the speed if the | |
698 | * device is not running. | |
699 | */ | |
700 | if (si->open) { | |
701 | ret = sa1100_irda_set_speed(si, | |
702 | rq->ifr_baudrate); | |
703 | } else { | |
704 | printk("sa1100_irda_ioctl: SIOCSBANDWIDTH: !netif_running\n"); | |
705 | ret = 0; | |
706 | } | |
707 | } | |
708 | break; | |
709 | ||
710 | case SIOCSMEDIABUSY: | |
711 | ret = -EPERM; | |
712 | if (capable(CAP_NET_ADMIN)) { | |
713 | irda_device_set_media_busy(dev, TRUE); | |
714 | ret = 0; | |
715 | } | |
716 | break; | |
717 | ||
718 | case SIOCGRECEIVING: | |
719 | rq->ifr_receiving = IS_FIR(si) ? 0 | |
720 | : si->rx_buff.state != OUTSIDE_FRAME; | |
721 | break; | |
722 | ||
723 | default: | |
724 | break; | |
725 | } | |
726 | ||
727 | return ret; | |
728 | } | |
729 | ||
cbe1d24f RK |
730 | static int sa1100_irda_startup(struct sa1100_irda *si) |
731 | { | |
732 | int ret; | |
733 | ||
734 | /* | |
735 | * Ensure that the ports for this device are setup correctly. | |
736 | */ | |
737 | if (si->pdata->startup) { | |
738 | ret = si->pdata->startup(si->dev); | |
739 | if (ret) | |
740 | return ret; | |
741 | } | |
742 | ||
743 | /* | |
744 | * Configure PPC for IRDA - we want to drive TXD2 low. | |
745 | * We also want to drive this pin low during sleep. | |
746 | */ | |
747 | PPSR &= ~PPC_TXD2; | |
748 | PSDR &= ~PPC_TXD2; | |
749 | PPDR |= PPC_TXD2; | |
750 | ||
751 | /* | |
752 | * Enable HP-SIR modulation, and ensure that the port is disabled. | |
753 | */ | |
754 | Ser2UTCR3 = 0; | |
755 | Ser2HSCR0 = HSCR0_UART; | |
756 | Ser2UTCR4 = si->utcr4; | |
757 | Ser2UTCR0 = UTCR0_8BitData; | |
758 | Ser2HSCR2 = HSCR2_TrDataH | HSCR2_RcDataL; | |
759 | ||
760 | /* | |
761 | * Clear status register | |
762 | */ | |
763 | Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; | |
764 | ||
765 | ret = sa1100_irda_set_speed(si, si->speed = 9600); | |
766 | if (ret) { | |
767 | Ser2UTCR3 = 0; | |
768 | Ser2HSCR0 = 0; | |
769 | ||
770 | if (si->pdata->shutdown) | |
771 | si->pdata->shutdown(si->dev); | |
772 | } | |
773 | ||
774 | return ret; | |
775 | } | |
776 | ||
777 | static void sa1100_irda_shutdown(struct sa1100_irda *si) | |
778 | { | |
779 | /* | |
780 | * Stop all DMA activity. | |
781 | */ | |
bf95154f RK |
782 | dmaengine_terminate_all(si->dma_rx.chan); |
783 | dmaengine_terminate_all(si->dma_tx.chan); | |
cbe1d24f RK |
784 | |
785 | /* Disable the port. */ | |
786 | Ser2UTCR3 = 0; | |
787 | Ser2HSCR0 = 0; | |
788 | ||
789 | if (si->pdata->shutdown) | |
790 | si->pdata->shutdown(si->dev); | |
791 | } | |
792 | ||
1da177e4 LT |
793 | static int sa1100_irda_start(struct net_device *dev) |
794 | { | |
4cf1653a | 795 | struct sa1100_irda *si = netdev_priv(dev); |
1da177e4 LT |
796 | int err; |
797 | ||
798 | si->speed = 9600; | |
799 | ||
bf95154f RK |
800 | err = sa1100_irda_dma_request(si->dev, &si->dma_rx, "Ser2ICPRc", |
801 | &sa1100_irda_fir_rx); | |
1da177e4 LT |
802 | if (err) |
803 | goto err_rx_dma; | |
804 | ||
bf95154f RK |
805 | err = sa1100_irda_dma_request(si->dev, &si->dma_tx, "Ser2ICPTr", |
806 | &sa1100_irda_sir_tx); | |
1da177e4 LT |
807 | if (err) |
808 | goto err_tx_dma; | |
809 | ||
1da177e4 LT |
810 | /* |
811 | * Setup the serial port for the specified speed. | |
812 | */ | |
813 | err = sa1100_irda_startup(si); | |
814 | if (err) | |
815 | goto err_startup; | |
816 | ||
817 | /* | |
818 | * Open a new IrLAP layer instance. | |
819 | */ | |
820 | si->irlap = irlap_open(dev, &si->qos, "sa1100"); | |
821 | err = -ENOMEM; | |
822 | if (!si->irlap) | |
823 | goto err_irlap; | |
824 | ||
374f7739 RK |
825 | err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev); |
826 | if (err) | |
827 | goto err_irq; | |
828 | ||
1da177e4 LT |
829 | /* |
830 | * Now enable the interrupt and start the queue | |
831 | */ | |
832 | si->open = 1; | |
833 | sa1100_set_power(si, power_level); /* low power mode */ | |
374f7739 | 834 | |
1da177e4 LT |
835 | netif_start_queue(dev); |
836 | return 0; | |
837 | ||
374f7739 RK |
838 | err_irq: |
839 | irlap_close(si->irlap); | |
1da177e4 LT |
840 | err_irlap: |
841 | si->open = 0; | |
842 | sa1100_irda_shutdown(si); | |
843 | err_startup: | |
bf95154f | 844 | dma_release_channel(si->dma_tx.chan); |
1da177e4 | 845 | err_tx_dma: |
bf95154f | 846 | dma_release_channel(si->dma_rx.chan); |
1da177e4 | 847 | err_rx_dma: |
1da177e4 LT |
848 | return err; |
849 | } | |
850 | ||
851 | static int sa1100_irda_stop(struct net_device *dev) | |
852 | { | |
4cf1653a | 853 | struct sa1100_irda *si = netdev_priv(dev); |
ba84525b | 854 | struct sk_buff *skb; |
1da177e4 | 855 | |
374f7739 RK |
856 | netif_stop_queue(dev); |
857 | ||
858 | si->open = 0; | |
1da177e4 LT |
859 | sa1100_irda_shutdown(si); |
860 | ||
861 | /* | |
ba84525b | 862 | * If we have been doing any DMA activity, make sure we |
1da177e4 LT |
863 | * tidy that up cleanly. |
864 | */ | |
ba84525b RK |
865 | skb = si->dma_rx.skb; |
866 | if (skb) { | |
3c500a35 | 867 | dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, |
32273f50 | 868 | DMA_FROM_DEVICE); |
ba84525b | 869 | dev_kfree_skb(skb); |
885767ca | 870 | si->dma_rx.skb = NULL; |
1da177e4 LT |
871 | } |
872 | ||
ba84525b RK |
873 | skb = si->dma_tx.skb; |
874 | if (skb) { | |
3c500a35 | 875 | dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, |
32273f50 | 876 | DMA_TO_DEVICE); |
ba84525b RK |
877 | dev_kfree_skb(skb); |
878 | si->dma_tx.skb = NULL; | |
879 | } | |
880 | ||
1da177e4 LT |
881 | /* Stop IrLAP */ |
882 | if (si->irlap) { | |
883 | irlap_close(si->irlap); | |
884 | si->irlap = NULL; | |
885 | } | |
886 | ||
1da177e4 LT |
887 | /* |
888 | * Free resources | |
889 | */ | |
bf95154f RK |
890 | dma_release_channel(si->dma_tx.chan); |
891 | dma_release_channel(si->dma_rx.chan); | |
1da177e4 LT |
892 | free_irq(dev->irq, dev); |
893 | ||
894 | sa1100_set_power(si, 0); | |
895 | ||
896 | return 0; | |
897 | } | |
898 | ||
899 | static int sa1100_irda_init_iobuf(iobuff_t *io, int size) | |
900 | { | |
901 | io->head = kmalloc(size, GFP_KERNEL | GFP_DMA); | |
902 | if (io->head != NULL) { | |
903 | io->truesize = size; | |
904 | io->in_frame = FALSE; | |
905 | io->state = OUTSIDE_FRAME; | |
906 | io->data = io->head; | |
907 | } | |
908 | return io->head ? 0 : -ENOMEM; | |
909 | } | |
910 | ||
a1de9666 AB |
911 | static const struct net_device_ops sa1100_irda_netdev_ops = { |
912 | .ndo_open = sa1100_irda_start, | |
913 | .ndo_stop = sa1100_irda_stop, | |
914 | .ndo_start_xmit = sa1100_irda_hard_xmit, | |
915 | .ndo_do_ioctl = sa1100_irda_ioctl, | |
a1de9666 AB |
916 | }; |
917 | ||
3ae5eaec | 918 | static int sa1100_irda_probe(struct platform_device *pdev) |
1da177e4 | 919 | { |
1da177e4 LT |
920 | struct net_device *dev; |
921 | struct sa1100_irda *si; | |
922 | unsigned int baudrate_mask; | |
e556fdbd | 923 | int err, irq; |
1da177e4 LT |
924 | |
925 | if (!pdev->dev.platform_data) | |
926 | return -EINVAL; | |
927 | ||
e556fdbd RK |
928 | irq = platform_get_irq(pdev, 0); |
929 | if (irq <= 0) | |
930 | return irq < 0 ? irq : -ENXIO; | |
931 | ||
1da177e4 LT |
932 | err = request_mem_region(__PREG(Ser2UTCR0), 0x24, "IrDA") ? 0 : -EBUSY; |
933 | if (err) | |
934 | goto err_mem_1; | |
935 | err = request_mem_region(__PREG(Ser2HSCR0), 0x1c, "IrDA") ? 0 : -EBUSY; | |
936 | if (err) | |
937 | goto err_mem_2; | |
938 | err = request_mem_region(__PREG(Ser2HSCR2), 0x04, "IrDA") ? 0 : -EBUSY; | |
939 | if (err) | |
940 | goto err_mem_3; | |
941 | ||
942 | dev = alloc_irdadev(sizeof(struct sa1100_irda)); | |
943 | if (!dev) | |
944 | goto err_mem_4; | |
945 | ||
d3238608 RK |
946 | SET_NETDEV_DEV(dev, &pdev->dev); |
947 | ||
4cf1653a | 948 | si = netdev_priv(dev); |
1da177e4 LT |
949 | si->dev = &pdev->dev; |
950 | si->pdata = pdev->dev.platform_data; | |
951 | ||
32273f50 RK |
952 | sg_init_table(&si->dma_rx.sg, 1); |
953 | sg_init_table(&si->dma_tx.sg, 1); | |
954 | ||
1da177e4 LT |
955 | /* |
956 | * Initialise the HP-SIR buffers | |
957 | */ | |
958 | err = sa1100_irda_init_iobuf(&si->rx_buff, 14384); | |
959 | if (err) | |
960 | goto err_mem_5; | |
04b7fc4d | 961 | err = sa1100_irda_init_iobuf(&si->tx_buff, IRDA_SIR_MAX_FRAME); |
1da177e4 LT |
962 | if (err) |
963 | goto err_mem_5; | |
964 | ||
a1de9666 | 965 | dev->netdev_ops = &sa1100_irda_netdev_ops; |
e556fdbd | 966 | dev->irq = irq; |
1da177e4 LT |
967 | |
968 | irda_init_max_qos_capabilies(&si->qos); | |
969 | ||
970 | /* | |
971 | * We support original IRDA up to 115k2. (we don't currently | |
972 | * support 4Mbps). Min Turn Time set to 1ms or greater. | |
973 | */ | |
974 | baudrate_mask = IR_9600; | |
975 | ||
976 | switch (max_rate) { | |
977 | case 4000000: baudrate_mask |= IR_4000000 << 8; | |
978 | case 115200: baudrate_mask |= IR_115200; | |
979 | case 57600: baudrate_mask |= IR_57600; | |
980 | case 38400: baudrate_mask |= IR_38400; | |
981 | case 19200: baudrate_mask |= IR_19200; | |
982 | } | |
983 | ||
984 | si->qos.baud_rate.bits &= baudrate_mask; | |
985 | si->qos.min_turn_time.bits = 7; | |
986 | ||
987 | irda_qos_bits_to_value(&si->qos); | |
988 | ||
989 | si->utcr4 = UTCR4_HPSIR; | |
990 | if (tx_lpm) | |
991 | si->utcr4 |= UTCR4_Z1_6us; | |
992 | ||
993 | /* | |
994 | * Initially enable HP-SIR modulation, and ensure that the port | |
995 | * is disabled. | |
996 | */ | |
997 | Ser2UTCR3 = 0; | |
998 | Ser2UTCR4 = si->utcr4; | |
999 | Ser2HSCR0 = HSCR0_UART; | |
1000 | ||
1001 | err = register_netdev(dev); | |
1002 | if (err == 0) | |
3ae5eaec | 1003 | platform_set_drvdata(pdev, dev); |
1da177e4 LT |
1004 | |
1005 | if (err) { | |
1006 | err_mem_5: | |
1007 | kfree(si->tx_buff.head); | |
1008 | kfree(si->rx_buff.head); | |
1009 | free_netdev(dev); | |
1010 | err_mem_4: | |
1011 | release_mem_region(__PREG(Ser2HSCR2), 0x04); | |
1012 | err_mem_3: | |
1013 | release_mem_region(__PREG(Ser2HSCR0), 0x1c); | |
1014 | err_mem_2: | |
1015 | release_mem_region(__PREG(Ser2UTCR0), 0x24); | |
1016 | } | |
1017 | err_mem_1: | |
1018 | return err; | |
1019 | } | |
1020 | ||
3ae5eaec | 1021 | static int sa1100_irda_remove(struct platform_device *pdev) |
1da177e4 | 1022 | { |
3ae5eaec | 1023 | struct net_device *dev = platform_get_drvdata(pdev); |
1da177e4 LT |
1024 | |
1025 | if (dev) { | |
4cf1653a | 1026 | struct sa1100_irda *si = netdev_priv(dev); |
1da177e4 LT |
1027 | unregister_netdev(dev); |
1028 | kfree(si->tx_buff.head); | |
1029 | kfree(si->rx_buff.head); | |
1030 | free_netdev(dev); | |
1031 | } | |
1032 | ||
1033 | release_mem_region(__PREG(Ser2HSCR2), 0x04); | |
1034 | release_mem_region(__PREG(Ser2HSCR0), 0x1c); | |
1035 | release_mem_region(__PREG(Ser2UTCR0), 0x24); | |
1036 | ||
1037 | return 0; | |
1038 | } | |
1039 | ||
cbe1d24f RK |
1040 | #ifdef CONFIG_PM |
1041 | /* | |
1042 | * Suspend the IrDA interface. | |
1043 | */ | |
1044 | static int sa1100_irda_suspend(struct platform_device *pdev, pm_message_t state) | |
1045 | { | |
1046 | struct net_device *dev = platform_get_drvdata(pdev); | |
1047 | struct sa1100_irda *si; | |
1048 | ||
1049 | if (!dev) | |
1050 | return 0; | |
1051 | ||
1052 | si = netdev_priv(dev); | |
1053 | if (si->open) { | |
1054 | /* | |
1055 | * Stop the transmit queue | |
1056 | */ | |
1057 | netif_device_detach(dev); | |
1058 | disable_irq(dev->irq); | |
1059 | sa1100_irda_shutdown(si); | |
1060 | __sa1100_irda_set_power(si, 0); | |
1061 | } | |
1062 | ||
1063 | return 0; | |
1064 | } | |
1065 | ||
1066 | /* | |
1067 | * Resume the IrDA interface. | |
1068 | */ | |
1069 | static int sa1100_irda_resume(struct platform_device *pdev) | |
1070 | { | |
1071 | struct net_device *dev = platform_get_drvdata(pdev); | |
1072 | struct sa1100_irda *si; | |
1073 | ||
1074 | if (!dev) | |
1075 | return 0; | |
1076 | ||
1077 | si = netdev_priv(dev); | |
1078 | if (si->open) { | |
1079 | /* | |
1080 | * If we missed a speed change, initialise at the new speed | |
1081 | * directly. It is debatable whether this is actually | |
1082 | * required, but in the interests of continuing from where | |
1083 | * we left off it is desirable. The converse argument is | |
1084 | * that we should re-negotiate at 9600 baud again. | |
1085 | */ | |
1086 | if (si->newspeed) { | |
1087 | si->speed = si->newspeed; | |
1088 | si->newspeed = 0; | |
1089 | } | |
1090 | ||
1091 | sa1100_irda_startup(si); | |
1092 | __sa1100_irda_set_power(si, si->power); | |
1093 | enable_irq(dev->irq); | |
1094 | ||
1095 | /* | |
1096 | * This automatically wakes up the queue | |
1097 | */ | |
1098 | netif_device_attach(dev); | |
1099 | } | |
1100 | ||
1101 | return 0; | |
1102 | } | |
1103 | #else | |
1104 | #define sa1100_irda_suspend NULL | |
1105 | #define sa1100_irda_resume NULL | |
1106 | #endif | |
1107 | ||
3ae5eaec | 1108 | static struct platform_driver sa1100ir_driver = { |
1da177e4 LT |
1109 | .probe = sa1100_irda_probe, |
1110 | .remove = sa1100_irda_remove, | |
1111 | .suspend = sa1100_irda_suspend, | |
1112 | .resume = sa1100_irda_resume, | |
3ae5eaec RK |
1113 | .driver = { |
1114 | .name = "sa11x0-ir", | |
72abb461 | 1115 | .owner = THIS_MODULE, |
3ae5eaec | 1116 | }, |
1da177e4 LT |
1117 | }; |
1118 | ||
1119 | static int __init sa1100_irda_init(void) | |
1120 | { | |
1121 | /* | |
1122 | * Limit power level a sensible range. | |
1123 | */ | |
1124 | if (power_level < 1) | |
1125 | power_level = 1; | |
1126 | if (power_level > 3) | |
1127 | power_level = 3; | |
1128 | ||
3ae5eaec | 1129 | return platform_driver_register(&sa1100ir_driver); |
1da177e4 LT |
1130 | } |
1131 | ||
1132 | static void __exit sa1100_irda_exit(void) | |
1133 | { | |
3ae5eaec | 1134 | platform_driver_unregister(&sa1100ir_driver); |
1da177e4 LT |
1135 | } |
1136 | ||
1137 | module_init(sa1100_irda_init); | |
1138 | module_exit(sa1100_irda_exit); | |
1139 | module_param(power_level, int, 0); | |
1140 | module_param(tx_lpm, int, 0); | |
1141 | module_param(max_rate, int, 0); | |
1142 | ||
1143 | MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); | |
1144 | MODULE_DESCRIPTION("StrongARM SA1100 IrDA driver"); | |
1145 | MODULE_LICENSE("GPL"); | |
1146 | MODULE_PARM_DESC(power_level, "IrDA power level, 1 (low) to 3 (high)"); | |
1147 | MODULE_PARM_DESC(tx_lpm, "Enable transmitter low power (1.6us) mode"); | |
1148 | MODULE_PARM_DESC(max_rate, "Maximum baud rate (4000000, 115200, 57600, 38400, 19200, 9600)"); | |
72abb461 | 1149 | MODULE_ALIAS("platform:sa11x0-ir"); |