Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */ |
2 | /* PLIP: A parallel port "network" driver for Linux. */ | |
3 | /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */ | |
4 | /* | |
5 | * Authors: Donald Becker <becker@scyld.com> | |
6 | * Tommy Thorn <thorn@daimi.aau.dk> | |
7 | * Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp> | |
8 | * Alan Cox <gw4pts@gw4pts.ampr.org> | |
9 | * Peter Bauer <100136.3530@compuserve.com> | |
10 | * Niibe Yutaka <gniibe@mri.co.jp> | |
11 | * Nimrod Zimerman <zimerman@mailandnews.com> | |
12 | * | |
13 | * Enhancements: | |
14 | * Modularization and ifreq/ifmap support by Alan Cox. | |
15 | * Rewritten by Niibe Yutaka. | |
16 | * parport-sharing awareness code by Philip Blundell. | |
17 | * SMP locking by Niibe Yutaka. | |
18 | * Support for parallel ports with no IRQ (poll mode), | |
6aa20a22 | 19 | * Modifications to use the parallel port API |
1da177e4 LT |
20 | * by Nimrod Zimerman. |
21 | * | |
22 | * Fixes: | |
23 | * Niibe Yutaka | |
24 | * - Module initialization. | |
25 | * - MTU fix. | |
26 | * - Make sure other end is OK, before sending a packet. | |
27 | * - Fix immediate timer problem. | |
28 | * | |
29 | * Al Viro | |
30 | * - Changed {enable,disable}_irq handling to make it work | |
31 | * with new ("stack") semantics. | |
32 | * | |
33 | * This program is free software; you can redistribute it and/or | |
34 | * modify it under the terms of the GNU General Public License | |
35 | * as published by the Free Software Foundation; either version | |
36 | * 2 of the License, or (at your option) any later version. | |
37 | */ | |
38 | ||
39 | /* | |
40 | * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com> | |
41 | * inspired by Russ Nelson's parallel port packet driver. | |
42 | * | |
43 | * NOTE: | |
44 | * Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0. | |
45 | * Because of the necessity to communicate to DOS machines with the | |
46 | * Crynwr packet driver, Peter Bauer changed the protocol again | |
47 | * back to original protocol. | |
48 | * | |
49 | * This version follows original PLIP protocol. | |
50 | * So, this PLIP can't communicate the PLIP of Linux v1.0. | |
51 | */ | |
52 | ||
53 | /* | |
54 | * To use with DOS box, please do (Turn on ARP switch): | |
55 | * # ifconfig plip[0-2] arp | |
56 | */ | |
57 | static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n"; | |
58 | ||
59 | /* | |
60 | Sources: | |
61 | Ideas and protocols came from Russ Nelson's <nelson@crynwr.com> | |
62 | "parallel.asm" parallel port packet driver. | |
63 | ||
64 | The "Crynwr" parallel port standard specifies the following protocol: | |
65 | Trigger by sending nibble '0x8' (this causes interrupt on other end) | |
66 | count-low octet | |
67 | count-high octet | |
68 | ... data octets | |
69 | checksum octet | |
70 | Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)> | |
71 | <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)> | |
72 | ||
73 | The packet is encapsulated as if it were ethernet. | |
74 | ||
75 | The cable used is a de facto standard parallel null cable -- sold as | |
76 | a "LapLink" cable by various places. You'll need a 12-conductor cable to | |
77 | make one yourself. The wiring is: | |
78 | SLCTIN 17 - 17 | |
79 | GROUND 25 - 25 | |
80 | D0->ERROR 2 - 15 15 - 2 | |
81 | D1->SLCT 3 - 13 13 - 3 | |
82 | D2->PAPOUT 4 - 12 12 - 4 | |
83 | D3->ACK 5 - 10 10 - 5 | |
84 | D4->BUSY 6 - 11 11 - 6 | |
85 | Do not connect the other pins. They are | |
86 | D5,D6,D7 are 7,8,9 | |
87 | STROBE is 1, FEED is 14, INIT is 16 | |
88 | extra grounds are 18,19,20,21,22,23,24 | |
89 | */ | |
90 | ||
91 | #include <linux/module.h> | |
92 | #include <linux/kernel.h> | |
93 | #include <linux/types.h> | |
94 | #include <linux/fcntl.h> | |
95 | #include <linux/interrupt.h> | |
96 | #include <linux/string.h> | |
97 | #include <linux/if_ether.h> | |
98 | #include <linux/in.h> | |
99 | #include <linux/errno.h> | |
100 | #include <linux/delay.h> | |
1da177e4 LT |
101 | #include <linux/init.h> |
102 | #include <linux/netdevice.h> | |
103 | #include <linux/etherdevice.h> | |
104 | #include <linux/inetdevice.h> | |
105 | #include <linux/skbuff.h> | |
106 | #include <linux/if_plip.h> | |
107 | #include <linux/workqueue.h> | |
1da177e4 LT |
108 | #include <linux/spinlock.h> |
109 | #include <linux/parport.h> | |
110 | #include <linux/bitops.h> | |
111 | ||
112 | #include <net/neighbour.h> | |
113 | ||
114 | #include <asm/system.h> | |
115 | #include <asm/irq.h> | |
116 | #include <asm/byteorder.h> | |
117 | #include <asm/semaphore.h> | |
118 | ||
119 | /* Maximum number of devices to support. */ | |
120 | #define PLIP_MAX 8 | |
121 | ||
122 | /* Use 0 for production, 1 for verification, >2 for debug */ | |
123 | #ifndef NET_DEBUG | |
124 | #define NET_DEBUG 1 | |
125 | #endif | |
f71e1309 | 126 | static const unsigned int net_debug = NET_DEBUG; |
1da177e4 LT |
127 | |
128 | #define ENABLE(irq) if (irq != -1) enable_irq(irq) | |
129 | #define DISABLE(irq) if (irq != -1) disable_irq(irq) | |
130 | ||
131 | /* In micro second */ | |
132 | #define PLIP_DELAY_UNIT 1 | |
133 | ||
134 | /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */ | |
135 | #define PLIP_TRIGGER_WAIT 500 | |
136 | ||
137 | /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */ | |
138 | #define PLIP_NIBBLE_WAIT 3000 | |
139 | ||
140 | /* Bottom halves */ | |
c4028958 DH |
141 | static void plip_kick_bh(struct work_struct *work); |
142 | static void plip_bh(struct work_struct *work); | |
143 | static void plip_timer_bh(struct work_struct *work); | |
1da177e4 LT |
144 | |
145 | /* Interrupt handler */ | |
7d12e780 | 146 | static void plip_interrupt(int irq, void *dev_id); |
1da177e4 LT |
147 | |
148 | /* Functions for DEV methods */ | |
149 | static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev); | |
150 | static int plip_hard_header(struct sk_buff *skb, struct net_device *dev, | |
151 | unsigned short type, void *daddr, | |
152 | void *saddr, unsigned len); | |
153 | static int plip_hard_header_cache(struct neighbour *neigh, | |
154 | struct hh_cache *hh); | |
155 | static int plip_open(struct net_device *dev); | |
156 | static int plip_close(struct net_device *dev); | |
157 | static struct net_device_stats *plip_get_stats(struct net_device *dev); | |
158 | static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | |
159 | static int plip_preempt(void *handle); | |
160 | static void plip_wakeup(void *handle); | |
77933d72 | 161 | |
1da177e4 LT |
162 | enum plip_connection_state { |
163 | PLIP_CN_NONE=0, | |
164 | PLIP_CN_RECEIVE, | |
165 | PLIP_CN_SEND, | |
166 | PLIP_CN_CLOSING, | |
167 | PLIP_CN_ERROR | |
168 | }; | |
169 | ||
170 | enum plip_packet_state { | |
171 | PLIP_PK_DONE=0, | |
172 | PLIP_PK_TRIGGER, | |
173 | PLIP_PK_LENGTH_LSB, | |
174 | PLIP_PK_LENGTH_MSB, | |
175 | PLIP_PK_DATA, | |
176 | PLIP_PK_CHECKSUM | |
177 | }; | |
178 | ||
179 | enum plip_nibble_state { | |
180 | PLIP_NB_BEGIN, | |
181 | PLIP_NB_1, | |
182 | PLIP_NB_2, | |
183 | }; | |
184 | ||
185 | struct plip_local { | |
186 | enum plip_packet_state state; | |
187 | enum plip_nibble_state nibble; | |
188 | union { | |
189 | struct { | |
190 | #if defined(__LITTLE_ENDIAN) | |
191 | unsigned char lsb; | |
192 | unsigned char msb; | |
193 | #elif defined(__BIG_ENDIAN) | |
194 | unsigned char msb; | |
195 | unsigned char lsb; | |
196 | #else | |
197 | #error "Please fix the endianness defines in <asm/byteorder.h>" | |
198 | #endif | |
199 | } b; | |
200 | unsigned short h; | |
201 | } length; | |
202 | unsigned short byte; | |
203 | unsigned char checksum; | |
204 | unsigned char data; | |
205 | struct sk_buff *skb; | |
206 | }; | |
207 | ||
208 | struct net_local { | |
209 | struct net_device_stats enet_stats; | |
c4028958 | 210 | struct net_device *dev; |
1da177e4 | 211 | struct work_struct immediate; |
c4028958 DH |
212 | struct delayed_work deferred; |
213 | struct delayed_work timer; | |
1da177e4 LT |
214 | struct plip_local snd_data; |
215 | struct plip_local rcv_data; | |
216 | struct pardevice *pardev; | |
217 | unsigned long trigger; | |
218 | unsigned long nibble; | |
219 | enum plip_connection_state connection; | |
220 | unsigned short timeout_count; | |
221 | int is_deferred; | |
222 | int port_owner; | |
223 | int should_relinquish; | |
224 | int (*orig_hard_header)(struct sk_buff *skb, struct net_device *dev, | |
225 | unsigned short type, void *daddr, | |
226 | void *saddr, unsigned len); | |
227 | int (*orig_hard_header_cache)(struct neighbour *neigh, | |
228 | struct hh_cache *hh); | |
229 | spinlock_t lock; | |
230 | atomic_t kill_timer; | |
231 | struct semaphore killed_timer_sem; | |
232 | }; | |
77933d72 JJ |
233 | |
234 | static inline void enable_parport_interrupts (struct net_device *dev) | |
1da177e4 LT |
235 | { |
236 | if (dev->irq != -1) | |
237 | { | |
238 | struct parport *port = | |
239 | ((struct net_local *)dev->priv)->pardev->port; | |
240 | port->ops->enable_irq (port); | |
241 | } | |
242 | } | |
243 | ||
77933d72 | 244 | static inline void disable_parport_interrupts (struct net_device *dev) |
1da177e4 LT |
245 | { |
246 | if (dev->irq != -1) | |
247 | { | |
248 | struct parport *port = | |
249 | ((struct net_local *)dev->priv)->pardev->port; | |
250 | port->ops->disable_irq (port); | |
251 | } | |
252 | } | |
253 | ||
77933d72 | 254 | static inline void write_data (struct net_device *dev, unsigned char data) |
1da177e4 LT |
255 | { |
256 | struct parport *port = | |
257 | ((struct net_local *)dev->priv)->pardev->port; | |
258 | ||
259 | port->ops->write_data (port, data); | |
260 | } | |
261 | ||
77933d72 | 262 | static inline unsigned char read_status (struct net_device *dev) |
1da177e4 LT |
263 | { |
264 | struct parport *port = | |
265 | ((struct net_local *)dev->priv)->pardev->port; | |
266 | ||
267 | return port->ops->read_status (port); | |
268 | } | |
77933d72 | 269 | |
1da177e4 LT |
270 | /* Entry point of PLIP driver. |
271 | Probe the hardware, and register/initialize the driver. | |
272 | ||
273 | PLIP is rather weird, because of the way it interacts with the parport | |
274 | system. It is _not_ initialised from Space.c. Instead, plip_init() | |
275 | is called, and that function makes up a "struct net_device" for each port, and | |
276 | then calls us here. | |
277 | ||
278 | */ | |
279 | static void | |
280 | plip_init_netdev(struct net_device *dev) | |
281 | { | |
282 | struct net_local *nl = netdev_priv(dev); | |
283 | ||
284 | /* Then, override parts of it */ | |
285 | dev->hard_start_xmit = plip_tx_packet; | |
286 | dev->open = plip_open; | |
287 | dev->stop = plip_close; | |
288 | dev->get_stats = plip_get_stats; | |
289 | dev->do_ioctl = plip_ioctl; | |
290 | dev->header_cache_update = NULL; | |
291 | dev->tx_queue_len = 10; | |
292 | dev->flags = IFF_POINTOPOINT|IFF_NOARP; | |
293 | memset(dev->dev_addr, 0xfc, ETH_ALEN); | |
294 | ||
295 | /* Set the private structure */ | |
296 | nl->orig_hard_header = dev->hard_header; | |
297 | dev->hard_header = plip_hard_header; | |
298 | ||
299 | nl->orig_hard_header_cache = dev->hard_header_cache; | |
300 | dev->hard_header_cache = plip_hard_header_cache; | |
301 | ||
302 | ||
303 | nl->port_owner = 0; | |
304 | ||
305 | /* Initialize constants */ | |
306 | nl->trigger = PLIP_TRIGGER_WAIT; | |
307 | nl->nibble = PLIP_NIBBLE_WAIT; | |
308 | ||
309 | /* Initialize task queue structures */ | |
c4028958 DH |
310 | INIT_WORK(&nl->immediate, plip_bh); |
311 | INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh); | |
1da177e4 LT |
312 | |
313 | if (dev->irq == -1) | |
c4028958 | 314 | INIT_DELAYED_WORK(&nl->timer, plip_timer_bh); |
1da177e4 LT |
315 | |
316 | spin_lock_init(&nl->lock); | |
317 | } | |
77933d72 | 318 | |
1da177e4 LT |
319 | /* Bottom half handler for the delayed request. |
320 | This routine is kicked by do_timer(). | |
321 | Request `plip_bh' to be invoked. */ | |
322 | static void | |
c4028958 | 323 | plip_kick_bh(struct work_struct *work) |
1da177e4 | 324 | { |
c4028958 DH |
325 | struct net_local *nl = |
326 | container_of(work, struct net_local, deferred.work); | |
1da177e4 LT |
327 | |
328 | if (nl->is_deferred) | |
329 | schedule_work(&nl->immediate); | |
330 | } | |
331 | ||
332 | /* Forward declarations of internal routines */ | |
333 | static int plip_none(struct net_device *, struct net_local *, | |
334 | struct plip_local *, struct plip_local *); | |
335 | static int plip_receive_packet(struct net_device *, struct net_local *, | |
336 | struct plip_local *, struct plip_local *); | |
337 | static int plip_send_packet(struct net_device *, struct net_local *, | |
338 | struct plip_local *, struct plip_local *); | |
339 | static int plip_connection_close(struct net_device *, struct net_local *, | |
340 | struct plip_local *, struct plip_local *); | |
341 | static int plip_error(struct net_device *, struct net_local *, | |
342 | struct plip_local *, struct plip_local *); | |
343 | static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, | |
344 | struct plip_local *snd, | |
345 | struct plip_local *rcv, | |
346 | int error); | |
347 | ||
348 | #define OK 0 | |
349 | #define TIMEOUT 1 | |
350 | #define ERROR 2 | |
351 | #define HS_TIMEOUT 3 | |
352 | ||
353 | typedef int (*plip_func)(struct net_device *dev, struct net_local *nl, | |
354 | struct plip_local *snd, struct plip_local *rcv); | |
355 | ||
f71e1309 | 356 | static const plip_func connection_state_table[] = |
1da177e4 LT |
357 | { |
358 | plip_none, | |
359 | plip_receive_packet, | |
360 | plip_send_packet, | |
361 | plip_connection_close, | |
362 | plip_error | |
363 | }; | |
364 | ||
365 | /* Bottom half handler of PLIP. */ | |
366 | static void | |
c4028958 | 367 | plip_bh(struct work_struct *work) |
1da177e4 | 368 | { |
c4028958 | 369 | struct net_local *nl = container_of(work, struct net_local, immediate); |
1da177e4 LT |
370 | struct plip_local *snd = &nl->snd_data; |
371 | struct plip_local *rcv = &nl->rcv_data; | |
372 | plip_func f; | |
373 | int r; | |
374 | ||
375 | nl->is_deferred = 0; | |
376 | f = connection_state_table[nl->connection]; | |
c4028958 DH |
377 | if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK |
378 | && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) { | |
1da177e4 LT |
379 | nl->is_deferred = 1; |
380 | schedule_delayed_work(&nl->deferred, 1); | |
381 | } | |
382 | } | |
383 | ||
384 | static void | |
c4028958 | 385 | plip_timer_bh(struct work_struct *work) |
1da177e4 | 386 | { |
c4028958 DH |
387 | struct net_local *nl = |
388 | container_of(work, struct net_local, timer.work); | |
6aa20a22 | 389 | |
1da177e4 | 390 | if (!(atomic_read (&nl->kill_timer))) { |
c4028958 | 391 | plip_interrupt (-1, nl->dev); |
1da177e4 LT |
392 | |
393 | schedule_delayed_work(&nl->timer, 1); | |
394 | } | |
395 | else { | |
396 | up (&nl->killed_timer_sem); | |
397 | } | |
398 | } | |
399 | ||
400 | static int | |
401 | plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, | |
402 | struct plip_local *snd, struct plip_local *rcv, | |
403 | int error) | |
404 | { | |
405 | unsigned char c0; | |
406 | /* | |
407 | * This is tricky. If we got here from the beginning of send (either | |
408 | * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's | |
409 | * already disabled. With the old variant of {enable,disable}_irq() | |
410 | * extra disable_irq() was a no-op. Now it became mortal - it's | |
411 | * unbalanced and thus we'll never re-enable IRQ (until rmmod plip, | |
412 | * that is). So we have to treat HS_TIMEOUT and ERROR from send | |
413 | * in a special way. | |
414 | */ | |
415 | ||
416 | spin_lock_irq(&nl->lock); | |
417 | if (nl->connection == PLIP_CN_SEND) { | |
418 | ||
419 | if (error != ERROR) { /* Timeout */ | |
420 | nl->timeout_count++; | |
421 | if ((error == HS_TIMEOUT | |
422 | && nl->timeout_count <= 10) | |
423 | || nl->timeout_count <= 3) { | |
424 | spin_unlock_irq(&nl->lock); | |
425 | /* Try again later */ | |
426 | return TIMEOUT; | |
427 | } | |
428 | c0 = read_status(dev); | |
429 | printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n", | |
430 | dev->name, snd->state, c0); | |
431 | } else | |
432 | error = HS_TIMEOUT; | |
433 | nl->enet_stats.tx_errors++; | |
434 | nl->enet_stats.tx_aborted_errors++; | |
435 | } else if (nl->connection == PLIP_CN_RECEIVE) { | |
436 | if (rcv->state == PLIP_PK_TRIGGER) { | |
437 | /* Transmission was interrupted. */ | |
438 | spin_unlock_irq(&nl->lock); | |
439 | return OK; | |
440 | } | |
441 | if (error != ERROR) { /* Timeout */ | |
442 | if (++nl->timeout_count <= 3) { | |
443 | spin_unlock_irq(&nl->lock); | |
444 | /* Try again later */ | |
445 | return TIMEOUT; | |
446 | } | |
447 | c0 = read_status(dev); | |
448 | printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n", | |
449 | dev->name, rcv->state, c0); | |
450 | } | |
451 | nl->enet_stats.rx_dropped++; | |
452 | } | |
453 | rcv->state = PLIP_PK_DONE; | |
454 | if (rcv->skb) { | |
455 | kfree_skb(rcv->skb); | |
456 | rcv->skb = NULL; | |
457 | } | |
458 | snd->state = PLIP_PK_DONE; | |
459 | if (snd->skb) { | |
460 | dev_kfree_skb(snd->skb); | |
461 | snd->skb = NULL; | |
462 | } | |
463 | spin_unlock_irq(&nl->lock); | |
464 | if (error == HS_TIMEOUT) { | |
465 | DISABLE(dev->irq); | |
466 | synchronize_irq(dev->irq); | |
467 | } | |
468 | disable_parport_interrupts (dev); | |
469 | netif_stop_queue (dev); | |
470 | nl->connection = PLIP_CN_ERROR; | |
471 | write_data (dev, 0x00); | |
472 | ||
473 | return TIMEOUT; | |
474 | } | |
77933d72 | 475 | |
1da177e4 LT |
476 | static int |
477 | plip_none(struct net_device *dev, struct net_local *nl, | |
478 | struct plip_local *snd, struct plip_local *rcv) | |
479 | { | |
480 | return OK; | |
481 | } | |
482 | ||
483 | /* PLIP_RECEIVE --- receive a byte(two nibbles) | |
484 | Returns OK on success, TIMEOUT on timeout */ | |
77933d72 | 485 | static inline int |
1da177e4 LT |
486 | plip_receive(unsigned short nibble_timeout, struct net_device *dev, |
487 | enum plip_nibble_state *ns_p, unsigned char *data_p) | |
488 | { | |
489 | unsigned char c0, c1; | |
490 | unsigned int cx; | |
491 | ||
492 | switch (*ns_p) { | |
493 | case PLIP_NB_BEGIN: | |
494 | cx = nibble_timeout; | |
495 | while (1) { | |
496 | c0 = read_status(dev); | |
497 | udelay(PLIP_DELAY_UNIT); | |
498 | if ((c0 & 0x80) == 0) { | |
499 | c1 = read_status(dev); | |
500 | if (c0 == c1) | |
501 | break; | |
502 | } | |
503 | if (--cx == 0) | |
504 | return TIMEOUT; | |
505 | } | |
506 | *data_p = (c0 >> 3) & 0x0f; | |
507 | write_data (dev, 0x10); /* send ACK */ | |
508 | *ns_p = PLIP_NB_1; | |
509 | ||
510 | case PLIP_NB_1: | |
511 | cx = nibble_timeout; | |
512 | while (1) { | |
513 | c0 = read_status(dev); | |
514 | udelay(PLIP_DELAY_UNIT); | |
515 | if (c0 & 0x80) { | |
516 | c1 = read_status(dev); | |
517 | if (c0 == c1) | |
518 | break; | |
519 | } | |
520 | if (--cx == 0) | |
521 | return TIMEOUT; | |
522 | } | |
523 | *data_p |= (c0 << 1) & 0xf0; | |
524 | write_data (dev, 0x00); /* send ACK */ | |
525 | *ns_p = PLIP_NB_BEGIN; | |
526 | case PLIP_NB_2: | |
527 | break; | |
528 | } | |
529 | return OK; | |
530 | } | |
531 | ||
532 | /* | |
6aa20a22 | 533 | * Determine the packet's protocol ID. The rule here is that we |
1da177e4 LT |
534 | * assume 802.3 if the type field is short enough to be a length. |
535 | * This is normal practice and works for any 'now in use' protocol. | |
536 | * | |
537 | * PLIP is ethernet ish but the daddr might not be valid if unicast. | |
538 | * PLIP fortunately has no bus architecture (its Point-to-point). | |
539 | * | |
540 | * We can't fix the daddr thing as that quirk (more bug) is embedded | |
541 | * in far too many old systems not all even running Linux. | |
542 | */ | |
6aa20a22 | 543 | |
ab611487 | 544 | static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev) |
1da177e4 LT |
545 | { |
546 | struct ethhdr *eth; | |
547 | unsigned char *rawp; | |
6aa20a22 | 548 | |
1da177e4 LT |
549 | skb->mac.raw=skb->data; |
550 | skb_pull(skb,dev->hard_header_len); | |
551 | eth = eth_hdr(skb); | |
6aa20a22 | 552 | |
1da177e4 LT |
553 | if(*eth->h_dest&1) |
554 | { | |
555 | if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0) | |
556 | skb->pkt_type=PACKET_BROADCAST; | |
557 | else | |
558 | skb->pkt_type=PACKET_MULTICAST; | |
559 | } | |
6aa20a22 | 560 | |
1da177e4 LT |
561 | /* |
562 | * This ALLMULTI check should be redundant by 1.4 | |
563 | * so don't forget to remove it. | |
564 | */ | |
6aa20a22 | 565 | |
1da177e4 LT |
566 | if (ntohs(eth->h_proto) >= 1536) |
567 | return eth->h_proto; | |
6aa20a22 | 568 | |
1da177e4 | 569 | rawp = skb->data; |
6aa20a22 | 570 | |
1da177e4 LT |
571 | /* |
572 | * This is a magic hack to spot IPX packets. Older Novell breaks | |
573 | * the protocol design and runs IPX over 802.3 without an 802.2 LLC | |
574 | * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This | |
575 | * won't work for fault tolerant netware but does for the rest. | |
576 | */ | |
577 | if (*(unsigned short *)rawp == 0xFFFF) | |
578 | return htons(ETH_P_802_3); | |
6aa20a22 | 579 | |
1da177e4 LT |
580 | /* |
581 | * Real 802.2 LLC | |
582 | */ | |
583 | return htons(ETH_P_802_2); | |
584 | } | |
585 | ||
1da177e4 LT |
586 | /* PLIP_RECEIVE_PACKET --- receive a packet */ |
587 | static int | |
588 | plip_receive_packet(struct net_device *dev, struct net_local *nl, | |
589 | struct plip_local *snd, struct plip_local *rcv) | |
590 | { | |
591 | unsigned short nibble_timeout = nl->nibble; | |
592 | unsigned char *lbuf; | |
593 | ||
594 | switch (rcv->state) { | |
595 | case PLIP_PK_TRIGGER: | |
596 | DISABLE(dev->irq); | |
597 | /* Don't need to synchronize irq, as we can safely ignore it */ | |
598 | disable_parport_interrupts (dev); | |
599 | write_data (dev, 0x01); /* send ACK */ | |
600 | if (net_debug > 2) | |
601 | printk(KERN_DEBUG "%s: receive start\n", dev->name); | |
602 | rcv->state = PLIP_PK_LENGTH_LSB; | |
603 | rcv->nibble = PLIP_NB_BEGIN; | |
604 | ||
605 | case PLIP_PK_LENGTH_LSB: | |
606 | if (snd->state != PLIP_PK_DONE) { | |
607 | if (plip_receive(nl->trigger, dev, | |
608 | &rcv->nibble, &rcv->length.b.lsb)) { | |
609 | /* collision, here dev->tbusy == 1 */ | |
610 | rcv->state = PLIP_PK_DONE; | |
611 | nl->is_deferred = 1; | |
612 | nl->connection = PLIP_CN_SEND; | |
613 | schedule_delayed_work(&nl->deferred, 1); | |
614 | enable_parport_interrupts (dev); | |
615 | ENABLE(dev->irq); | |
616 | return OK; | |
617 | } | |
618 | } else { | |
619 | if (plip_receive(nibble_timeout, dev, | |
620 | &rcv->nibble, &rcv->length.b.lsb)) | |
621 | return TIMEOUT; | |
622 | } | |
623 | rcv->state = PLIP_PK_LENGTH_MSB; | |
624 | ||
625 | case PLIP_PK_LENGTH_MSB: | |
626 | if (plip_receive(nibble_timeout, dev, | |
627 | &rcv->nibble, &rcv->length.b.msb)) | |
628 | return TIMEOUT; | |
629 | if (rcv->length.h > dev->mtu + dev->hard_header_len | |
630 | || rcv->length.h < 8) { | |
631 | printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h); | |
632 | return ERROR; | |
633 | } | |
634 | /* Malloc up new buffer. */ | |
635 | rcv->skb = dev_alloc_skb(rcv->length.h + 2); | |
636 | if (rcv->skb == NULL) { | |
637 | printk(KERN_ERR "%s: Memory squeeze.\n", dev->name); | |
638 | return ERROR; | |
639 | } | |
640 | skb_reserve(rcv->skb, 2); /* Align IP on 16 byte boundaries */ | |
641 | skb_put(rcv->skb,rcv->length.h); | |
642 | rcv->skb->dev = dev; | |
643 | rcv->state = PLIP_PK_DATA; | |
644 | rcv->byte = 0; | |
645 | rcv->checksum = 0; | |
646 | ||
647 | case PLIP_PK_DATA: | |
648 | lbuf = rcv->skb->data; | |
649 | do | |
650 | if (plip_receive(nibble_timeout, dev, | |
651 | &rcv->nibble, &lbuf[rcv->byte])) | |
652 | return TIMEOUT; | |
653 | while (++rcv->byte < rcv->length.h); | |
654 | do | |
655 | rcv->checksum += lbuf[--rcv->byte]; | |
656 | while (rcv->byte); | |
657 | rcv->state = PLIP_PK_CHECKSUM; | |
658 | ||
659 | case PLIP_PK_CHECKSUM: | |
660 | if (plip_receive(nibble_timeout, dev, | |
661 | &rcv->nibble, &rcv->data)) | |
662 | return TIMEOUT; | |
663 | if (rcv->data != rcv->checksum) { | |
664 | nl->enet_stats.rx_crc_errors++; | |
665 | if (net_debug) | |
666 | printk(KERN_DEBUG "%s: checksum error\n", dev->name); | |
667 | return ERROR; | |
668 | } | |
669 | rcv->state = PLIP_PK_DONE; | |
670 | ||
671 | case PLIP_PK_DONE: | |
672 | /* Inform the upper layer for the arrival of a packet. */ | |
673 | rcv->skb->protocol=plip_type_trans(rcv->skb, dev); | |
674 | netif_rx(rcv->skb); | |
675 | dev->last_rx = jiffies; | |
676 | nl->enet_stats.rx_bytes += rcv->length.h; | |
677 | nl->enet_stats.rx_packets++; | |
678 | rcv->skb = NULL; | |
679 | if (net_debug > 2) | |
680 | printk(KERN_DEBUG "%s: receive end\n", dev->name); | |
681 | ||
682 | /* Close the connection. */ | |
683 | write_data (dev, 0x00); | |
684 | spin_lock_irq(&nl->lock); | |
685 | if (snd->state != PLIP_PK_DONE) { | |
686 | nl->connection = PLIP_CN_SEND; | |
687 | spin_unlock_irq(&nl->lock); | |
688 | schedule_work(&nl->immediate); | |
689 | enable_parport_interrupts (dev); | |
690 | ENABLE(dev->irq); | |
691 | return OK; | |
692 | } else { | |
693 | nl->connection = PLIP_CN_NONE; | |
694 | spin_unlock_irq(&nl->lock); | |
695 | enable_parport_interrupts (dev); | |
696 | ENABLE(dev->irq); | |
697 | return OK; | |
698 | } | |
699 | } | |
700 | return OK; | |
701 | } | |
702 | ||
703 | /* PLIP_SEND --- send a byte (two nibbles) | |
704 | Returns OK on success, TIMEOUT when timeout */ | |
77933d72 | 705 | static inline int |
1da177e4 LT |
706 | plip_send(unsigned short nibble_timeout, struct net_device *dev, |
707 | enum plip_nibble_state *ns_p, unsigned char data) | |
708 | { | |
709 | unsigned char c0; | |
710 | unsigned int cx; | |
711 | ||
712 | switch (*ns_p) { | |
713 | case PLIP_NB_BEGIN: | |
714 | write_data (dev, data & 0x0f); | |
715 | *ns_p = PLIP_NB_1; | |
716 | ||
717 | case PLIP_NB_1: | |
718 | write_data (dev, 0x10 | (data & 0x0f)); | |
719 | cx = nibble_timeout; | |
720 | while (1) { | |
721 | c0 = read_status(dev); | |
722 | if ((c0 & 0x80) == 0) | |
723 | break; | |
724 | if (--cx == 0) | |
725 | return TIMEOUT; | |
726 | udelay(PLIP_DELAY_UNIT); | |
727 | } | |
728 | write_data (dev, 0x10 | (data >> 4)); | |
729 | *ns_p = PLIP_NB_2; | |
730 | ||
731 | case PLIP_NB_2: | |
732 | write_data (dev, (data >> 4)); | |
733 | cx = nibble_timeout; | |
734 | while (1) { | |
735 | c0 = read_status(dev); | |
736 | if (c0 & 0x80) | |
737 | break; | |
738 | if (--cx == 0) | |
739 | return TIMEOUT; | |
740 | udelay(PLIP_DELAY_UNIT); | |
741 | } | |
742 | *ns_p = PLIP_NB_BEGIN; | |
743 | return OK; | |
744 | } | |
745 | return OK; | |
746 | } | |
747 | ||
748 | /* PLIP_SEND_PACKET --- send a packet */ | |
749 | static int | |
750 | plip_send_packet(struct net_device *dev, struct net_local *nl, | |
751 | struct plip_local *snd, struct plip_local *rcv) | |
752 | { | |
753 | unsigned short nibble_timeout = nl->nibble; | |
754 | unsigned char *lbuf; | |
755 | unsigned char c0; | |
756 | unsigned int cx; | |
757 | ||
758 | if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) { | |
759 | printk(KERN_DEBUG "%s: send skb lost\n", dev->name); | |
760 | snd->state = PLIP_PK_DONE; | |
761 | snd->skb = NULL; | |
762 | return ERROR; | |
763 | } | |
764 | ||
765 | switch (snd->state) { | |
766 | case PLIP_PK_TRIGGER: | |
767 | if ((read_status(dev) & 0xf8) != 0x80) | |
768 | return HS_TIMEOUT; | |
769 | ||
770 | /* Trigger remote rx interrupt. */ | |
771 | write_data (dev, 0x08); | |
772 | cx = nl->trigger; | |
773 | while (1) { | |
774 | udelay(PLIP_DELAY_UNIT); | |
775 | spin_lock_irq(&nl->lock); | |
776 | if (nl->connection == PLIP_CN_RECEIVE) { | |
777 | spin_unlock_irq(&nl->lock); | |
778 | /* Interrupted. */ | |
779 | nl->enet_stats.collisions++; | |
780 | return OK; | |
781 | } | |
782 | c0 = read_status(dev); | |
783 | if (c0 & 0x08) { | |
784 | spin_unlock_irq(&nl->lock); | |
785 | DISABLE(dev->irq); | |
786 | synchronize_irq(dev->irq); | |
787 | if (nl->connection == PLIP_CN_RECEIVE) { | |
788 | /* Interrupted. | |
789 | We don't need to enable irq, | |
790 | as it is soon disabled. */ | |
791 | /* Yes, we do. New variant of | |
792 | {enable,disable}_irq *counts* | |
793 | them. -- AV */ | |
794 | ENABLE(dev->irq); | |
795 | nl->enet_stats.collisions++; | |
796 | return OK; | |
797 | } | |
798 | disable_parport_interrupts (dev); | |
799 | if (net_debug > 2) | |
800 | printk(KERN_DEBUG "%s: send start\n", dev->name); | |
801 | snd->state = PLIP_PK_LENGTH_LSB; | |
802 | snd->nibble = PLIP_NB_BEGIN; | |
803 | nl->timeout_count = 0; | |
804 | break; | |
805 | } | |
806 | spin_unlock_irq(&nl->lock); | |
807 | if (--cx == 0) { | |
808 | write_data (dev, 0x00); | |
809 | return HS_TIMEOUT; | |
810 | } | |
811 | } | |
812 | ||
813 | case PLIP_PK_LENGTH_LSB: | |
814 | if (plip_send(nibble_timeout, dev, | |
815 | &snd->nibble, snd->length.b.lsb)) | |
816 | return TIMEOUT; | |
817 | snd->state = PLIP_PK_LENGTH_MSB; | |
818 | ||
819 | case PLIP_PK_LENGTH_MSB: | |
820 | if (plip_send(nibble_timeout, dev, | |
821 | &snd->nibble, snd->length.b.msb)) | |
822 | return TIMEOUT; | |
823 | snd->state = PLIP_PK_DATA; | |
824 | snd->byte = 0; | |
825 | snd->checksum = 0; | |
826 | ||
827 | case PLIP_PK_DATA: | |
828 | do | |
829 | if (plip_send(nibble_timeout, dev, | |
830 | &snd->nibble, lbuf[snd->byte])) | |
831 | return TIMEOUT; | |
832 | while (++snd->byte < snd->length.h); | |
833 | do | |
834 | snd->checksum += lbuf[--snd->byte]; | |
835 | while (snd->byte); | |
836 | snd->state = PLIP_PK_CHECKSUM; | |
837 | ||
838 | case PLIP_PK_CHECKSUM: | |
839 | if (plip_send(nibble_timeout, dev, | |
840 | &snd->nibble, snd->checksum)) | |
841 | return TIMEOUT; | |
842 | ||
843 | nl->enet_stats.tx_bytes += snd->skb->len; | |
844 | dev_kfree_skb(snd->skb); | |
845 | nl->enet_stats.tx_packets++; | |
846 | snd->state = PLIP_PK_DONE; | |
847 | ||
848 | case PLIP_PK_DONE: | |
849 | /* Close the connection */ | |
850 | write_data (dev, 0x00); | |
851 | snd->skb = NULL; | |
852 | if (net_debug > 2) | |
853 | printk(KERN_DEBUG "%s: send end\n", dev->name); | |
854 | nl->connection = PLIP_CN_CLOSING; | |
855 | nl->is_deferred = 1; | |
856 | schedule_delayed_work(&nl->deferred, 1); | |
857 | enable_parport_interrupts (dev); | |
858 | ENABLE(dev->irq); | |
859 | return OK; | |
860 | } | |
861 | return OK; | |
862 | } | |
863 | ||
864 | static int | |
865 | plip_connection_close(struct net_device *dev, struct net_local *nl, | |
866 | struct plip_local *snd, struct plip_local *rcv) | |
867 | { | |
868 | spin_lock_irq(&nl->lock); | |
869 | if (nl->connection == PLIP_CN_CLOSING) { | |
870 | nl->connection = PLIP_CN_NONE; | |
871 | netif_wake_queue (dev); | |
872 | } | |
873 | spin_unlock_irq(&nl->lock); | |
874 | if (nl->should_relinquish) { | |
875 | nl->should_relinquish = nl->port_owner = 0; | |
876 | parport_release(nl->pardev); | |
877 | } | |
878 | return OK; | |
879 | } | |
880 | ||
881 | /* PLIP_ERROR --- wait till other end settled */ | |
882 | static int | |
883 | plip_error(struct net_device *dev, struct net_local *nl, | |
884 | struct plip_local *snd, struct plip_local *rcv) | |
885 | { | |
886 | unsigned char status; | |
887 | ||
888 | status = read_status(dev); | |
889 | if ((status & 0xf8) == 0x80) { | |
890 | if (net_debug > 2) | |
891 | printk(KERN_DEBUG "%s: reset interface.\n", dev->name); | |
892 | nl->connection = PLIP_CN_NONE; | |
893 | nl->should_relinquish = 0; | |
894 | netif_start_queue (dev); | |
895 | enable_parport_interrupts (dev); | |
896 | ENABLE(dev->irq); | |
897 | netif_wake_queue (dev); | |
898 | } else { | |
899 | nl->is_deferred = 1; | |
900 | schedule_delayed_work(&nl->deferred, 1); | |
901 | } | |
902 | ||
903 | return OK; | |
904 | } | |
77933d72 | 905 | |
1da177e4 LT |
906 | /* Handle the parallel port interrupts. */ |
907 | static void | |
7d12e780 | 908 | plip_interrupt(int irq, void *dev_id) |
1da177e4 LT |
909 | { |
910 | struct net_device *dev = dev_id; | |
911 | struct net_local *nl; | |
912 | struct plip_local *rcv; | |
913 | unsigned char c0; | |
914 | ||
1da177e4 LT |
915 | nl = netdev_priv(dev); |
916 | rcv = &nl->rcv_data; | |
917 | ||
918 | spin_lock_irq (&nl->lock); | |
919 | ||
920 | c0 = read_status(dev); | |
921 | if ((c0 & 0xf8) != 0xc0) { | |
922 | if ((dev->irq != -1) && (net_debug > 1)) | |
923 | printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name); | |
924 | spin_unlock_irq (&nl->lock); | |
925 | return; | |
926 | } | |
927 | ||
928 | if (net_debug > 3) | |
929 | printk(KERN_DEBUG "%s: interrupt.\n", dev->name); | |
930 | ||
931 | switch (nl->connection) { | |
932 | case PLIP_CN_CLOSING: | |
933 | netif_wake_queue (dev); | |
934 | case PLIP_CN_NONE: | |
935 | case PLIP_CN_SEND: | |
936 | rcv->state = PLIP_PK_TRIGGER; | |
937 | nl->connection = PLIP_CN_RECEIVE; | |
938 | nl->timeout_count = 0; | |
939 | schedule_work(&nl->immediate); | |
940 | break; | |
941 | ||
942 | case PLIP_CN_RECEIVE: | |
943 | /* May occur because there is race condition | |
944 | around test and set of dev->interrupt. | |
945 | Ignore this interrupt. */ | |
946 | break; | |
947 | ||
948 | case PLIP_CN_ERROR: | |
949 | printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name); | |
950 | break; | |
951 | } | |
952 | ||
953 | spin_unlock_irq(&nl->lock); | |
954 | } | |
77933d72 | 955 | |
1da177e4 LT |
956 | static int |
957 | plip_tx_packet(struct sk_buff *skb, struct net_device *dev) | |
958 | { | |
959 | struct net_local *nl = netdev_priv(dev); | |
960 | struct plip_local *snd = &nl->snd_data; | |
961 | ||
962 | if (netif_queue_stopped(dev)) | |
963 | return 1; | |
964 | ||
965 | /* We may need to grab the bus */ | |
966 | if (!nl->port_owner) { | |
967 | if (parport_claim(nl->pardev)) | |
968 | return 1; | |
969 | nl->port_owner = 1; | |
970 | } | |
971 | ||
972 | netif_stop_queue (dev); | |
6aa20a22 | 973 | |
1da177e4 LT |
974 | if (skb->len > dev->mtu + dev->hard_header_len) { |
975 | printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len); | |
976 | netif_start_queue (dev); | |
977 | return 1; | |
978 | } | |
979 | ||
980 | if (net_debug > 2) | |
981 | printk(KERN_DEBUG "%s: send request\n", dev->name); | |
982 | ||
983 | spin_lock_irq(&nl->lock); | |
984 | dev->trans_start = jiffies; | |
985 | snd->skb = skb; | |
986 | snd->length.h = skb->len; | |
987 | snd->state = PLIP_PK_TRIGGER; | |
988 | if (nl->connection == PLIP_CN_NONE) { | |
989 | nl->connection = PLIP_CN_SEND; | |
990 | nl->timeout_count = 0; | |
991 | } | |
992 | schedule_work(&nl->immediate); | |
993 | spin_unlock_irq(&nl->lock); | |
6aa20a22 | 994 | |
1da177e4 LT |
995 | return 0; |
996 | } | |
997 | ||
998 | static void | |
999 | plip_rewrite_address(struct net_device *dev, struct ethhdr *eth) | |
1000 | { | |
1001 | struct in_device *in_dev; | |
1002 | ||
1003 | if ((in_dev=dev->ip_ptr) != NULL) { | |
1004 | /* Any address will do - we take the first */ | |
1005 | struct in_ifaddr *ifa=in_dev->ifa_list; | |
1006 | if (ifa != NULL) { | |
1007 | memcpy(eth->h_source, dev->dev_addr, 6); | |
1008 | memset(eth->h_dest, 0xfc, 2); | |
1009 | memcpy(eth->h_dest+2, &ifa->ifa_address, 4); | |
1010 | } | |
1011 | } | |
1012 | } | |
1013 | ||
1014 | static int | |
1015 | plip_hard_header(struct sk_buff *skb, struct net_device *dev, | |
1016 | unsigned short type, void *daddr, | |
1017 | void *saddr, unsigned len) | |
1018 | { | |
1019 | struct net_local *nl = netdev_priv(dev); | |
1020 | int ret; | |
1021 | ||
1022 | if ((ret = nl->orig_hard_header(skb, dev, type, daddr, saddr, len)) >= 0) | |
1023 | plip_rewrite_address (dev, (struct ethhdr *)skb->data); | |
1024 | ||
1025 | return ret; | |
1026 | } | |
1027 | ||
1028 | int plip_hard_header_cache(struct neighbour *neigh, | |
1029 | struct hh_cache *hh) | |
1030 | { | |
1031 | struct net_local *nl = neigh->dev->priv; | |
1032 | int ret; | |
6aa20a22 | 1033 | |
1da177e4 LT |
1034 | if ((ret = nl->orig_hard_header_cache(neigh, hh)) == 0) |
1035 | { | |
1036 | struct ethhdr *eth; | |
1037 | ||
1038 | eth = (struct ethhdr*)(((u8*)hh->hh_data) + | |
1039 | HH_DATA_OFF(sizeof(*eth))); | |
1040 | plip_rewrite_address (neigh->dev, eth); | |
1041 | } | |
6aa20a22 | 1042 | |
1da177e4 | 1043 | return ret; |
6aa20a22 | 1044 | } |
1da177e4 LT |
1045 | |
1046 | /* Open/initialize the board. This is called (in the current kernel) | |
1047 | sometime after booting when the 'ifconfig' program is run. | |
1048 | ||
1049 | This routine gets exclusive access to the parallel port by allocating | |
1050 | its IRQ line. | |
1051 | */ | |
1052 | static int | |
1053 | plip_open(struct net_device *dev) | |
1054 | { | |
1055 | struct net_local *nl = netdev_priv(dev); | |
1056 | struct in_device *in_dev; | |
1057 | ||
1058 | /* Grab the port */ | |
1059 | if (!nl->port_owner) { | |
1060 | if (parport_claim(nl->pardev)) return -EAGAIN; | |
1061 | nl->port_owner = 1; | |
1062 | } | |
1063 | ||
1064 | nl->should_relinquish = 0; | |
1065 | ||
1066 | /* Clear the data port. */ | |
1067 | write_data (dev, 0x00); | |
1068 | ||
1069 | /* Enable rx interrupt. */ | |
1070 | enable_parport_interrupts (dev); | |
1071 | if (dev->irq == -1) | |
1072 | { | |
1073 | atomic_set (&nl->kill_timer, 0); | |
1074 | schedule_delayed_work(&nl->timer, 1); | |
1075 | } | |
1076 | ||
1077 | /* Initialize the state machine. */ | |
1078 | nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE; | |
1079 | nl->rcv_data.skb = nl->snd_data.skb = NULL; | |
1080 | nl->connection = PLIP_CN_NONE; | |
1081 | nl->is_deferred = 0; | |
1082 | ||
1083 | /* Fill in the MAC-level header. | |
1084 | We used to abuse dev->broadcast to store the point-to-point | |
1085 | MAC address, but we no longer do it. Instead, we fetch the | |
1086 | interface address whenever it is needed, which is cheap enough | |
1087 | because we use the hh_cache. Actually, abusing dev->broadcast | |
1088 | didn't work, because when using plip_open the point-to-point | |
1089 | address isn't yet known. | |
1090 | PLIP doesn't have a real MAC address, but we need it to be | |
1091 | DOS compatible, and to properly support taps (otherwise, | |
1092 | when the device address isn't identical to the address of a | |
1093 | received frame, the kernel incorrectly drops it). */ | |
1094 | ||
1095 | if ((in_dev=dev->ip_ptr) != NULL) { | |
1096 | /* Any address will do - we take the first. We already | |
1097 | have the first two bytes filled with 0xfc, from | |
1098 | plip_init_dev(). */ | |
1099 | struct in_ifaddr *ifa=in_dev->ifa_list; | |
1100 | if (ifa != NULL) { | |
1101 | memcpy(dev->dev_addr+2, &ifa->ifa_local, 4); | |
1102 | } | |
1103 | } | |
1104 | ||
1105 | netif_start_queue (dev); | |
1106 | ||
1107 | return 0; | |
1108 | } | |
1109 | ||
1110 | /* The inverse routine to plip_open (). */ | |
1111 | static int | |
1112 | plip_close(struct net_device *dev) | |
1113 | { | |
1114 | struct net_local *nl = netdev_priv(dev); | |
1115 | struct plip_local *snd = &nl->snd_data; | |
1116 | struct plip_local *rcv = &nl->rcv_data; | |
1117 | ||
1118 | netif_stop_queue (dev); | |
1119 | DISABLE(dev->irq); | |
1120 | synchronize_irq(dev->irq); | |
1121 | ||
1122 | if (dev->irq == -1) | |
1123 | { | |
1124 | init_MUTEX_LOCKED (&nl->killed_timer_sem); | |
1125 | atomic_set (&nl->kill_timer, 1); | |
1126 | down (&nl->killed_timer_sem); | |
1127 | } | |
1128 | ||
1129 | #ifdef NOTDEF | |
1130 | outb(0x00, PAR_DATA(dev)); | |
1131 | #endif | |
1132 | nl->is_deferred = 0; | |
1133 | nl->connection = PLIP_CN_NONE; | |
1134 | if (nl->port_owner) { | |
1135 | parport_release(nl->pardev); | |
1136 | nl->port_owner = 0; | |
1137 | } | |
1138 | ||
1139 | snd->state = PLIP_PK_DONE; | |
1140 | if (snd->skb) { | |
1141 | dev_kfree_skb(snd->skb); | |
1142 | snd->skb = NULL; | |
1143 | } | |
1144 | rcv->state = PLIP_PK_DONE; | |
1145 | if (rcv->skb) { | |
1146 | kfree_skb(rcv->skb); | |
1147 | rcv->skb = NULL; | |
1148 | } | |
1149 | ||
1150 | #ifdef NOTDEF | |
1151 | /* Reset. */ | |
1152 | outb(0x00, PAR_CONTROL(dev)); | |
1153 | #endif | |
1154 | return 0; | |
1155 | } | |
1156 | ||
1157 | static int | |
1158 | plip_preempt(void *handle) | |
1159 | { | |
1160 | struct net_device *dev = (struct net_device *)handle; | |
1161 | struct net_local *nl = netdev_priv(dev); | |
1162 | ||
1163 | /* Stand our ground if a datagram is on the wire */ | |
1164 | if (nl->connection != PLIP_CN_NONE) { | |
1165 | nl->should_relinquish = 1; | |
1166 | return 1; | |
1167 | } | |
1168 | ||
1169 | nl->port_owner = 0; /* Remember that we released the bus */ | |
1170 | return 0; | |
1171 | } | |
1172 | ||
1173 | static void | |
1174 | plip_wakeup(void *handle) | |
1175 | { | |
1176 | struct net_device *dev = (struct net_device *)handle; | |
1177 | struct net_local *nl = netdev_priv(dev); | |
1178 | ||
1179 | if (nl->port_owner) { | |
1180 | /* Why are we being woken up? */ | |
1181 | printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name); | |
1182 | if (!parport_claim(nl->pardev)) | |
1183 | /* bus_owner is already set (but why?) */ | |
1184 | printk(KERN_DEBUG "%s: I'm broken.\n", dev->name); | |
1185 | else | |
1186 | return; | |
1187 | } | |
6aa20a22 | 1188 | |
1da177e4 LT |
1189 | if (!(dev->flags & IFF_UP)) |
1190 | /* Don't need the port when the interface is down */ | |
1191 | return; | |
1192 | ||
1193 | if (!parport_claim(nl->pardev)) { | |
1194 | nl->port_owner = 1; | |
1195 | /* Clear the data port. */ | |
1196 | write_data (dev, 0x00); | |
1197 | } | |
1198 | ||
1199 | return; | |
1200 | } | |
1201 | ||
1202 | static struct net_device_stats * | |
1203 | plip_get_stats(struct net_device *dev) | |
1204 | { | |
1205 | struct net_local *nl = netdev_priv(dev); | |
1206 | struct net_device_stats *r = &nl->enet_stats; | |
1207 | ||
1208 | return r; | |
1209 | } | |
1210 | ||
1211 | static int | |
1212 | plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
1213 | { | |
1214 | struct net_local *nl = netdev_priv(dev); | |
1215 | struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru; | |
1216 | ||
1217 | if (cmd != SIOCDEVPLIP) | |
1218 | return -EOPNOTSUPP; | |
1219 | ||
1220 | switch(pc->pcmd) { | |
1221 | case PLIP_GET_TIMEOUT: | |
1222 | pc->trigger = nl->trigger; | |
1223 | pc->nibble = nl->nibble; | |
1224 | break; | |
1225 | case PLIP_SET_TIMEOUT: | |
1226 | if(!capable(CAP_NET_ADMIN)) | |
1227 | return -EPERM; | |
1228 | nl->trigger = pc->trigger; | |
1229 | nl->nibble = pc->nibble; | |
1230 | break; | |
1231 | default: | |
1232 | return -EOPNOTSUPP; | |
1233 | } | |
1234 | return 0; | |
1235 | } | |
77933d72 | 1236 | |
1da177e4 LT |
1237 | static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 }; |
1238 | static int timid; | |
1239 | ||
1240 | module_param_array(parport, int, NULL, 0); | |
1241 | module_param(timid, int, 0); | |
1242 | MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip"); | |
1243 | ||
1244 | static struct net_device *dev_plip[PLIP_MAX] = { NULL, }; | |
1245 | ||
1246 | static inline int | |
1247 | plip_searchfor(int list[], int a) | |
1248 | { | |
1249 | int i; | |
1250 | for (i = 0; i < PLIP_MAX && list[i] != -1; i++) { | |
1251 | if (list[i] == a) return 1; | |
1252 | } | |
1253 | return 0; | |
1254 | } | |
1255 | ||
1256 | /* plip_attach() is called (by the parport code) when a port is | |
1257 | * available to use. */ | |
1258 | static void plip_attach (struct parport *port) | |
1259 | { | |
1260 | static int unit; | |
1261 | struct net_device *dev; | |
1262 | struct net_local *nl; | |
1263 | char name[IFNAMSIZ]; | |
1264 | ||
6aa20a22 | 1265 | if ((parport[0] == -1 && (!timid || !port->devices)) || |
1da177e4 LT |
1266 | plip_searchfor(parport, port->number)) { |
1267 | if (unit == PLIP_MAX) { | |
1268 | printk(KERN_ERR "plip: too many devices\n"); | |
1269 | return; | |
1270 | } | |
1271 | ||
1272 | sprintf(name, "plip%d", unit); | |
1273 | dev = alloc_etherdev(sizeof(struct net_local)); | |
1274 | if (!dev) { | |
1275 | printk(KERN_ERR "plip: memory squeeze\n"); | |
1276 | return; | |
1277 | } | |
6aa20a22 | 1278 | |
1da177e4 LT |
1279 | strcpy(dev->name, name); |
1280 | ||
1281 | SET_MODULE_OWNER(dev); | |
1282 | dev->irq = port->irq; | |
1283 | dev->base_addr = port->base; | |
1284 | if (port->irq == -1) { | |
1285 | printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode," | |
1286 | "which is fairly inefficient!\n", port->name); | |
1287 | } | |
1288 | ||
1289 | nl = netdev_priv(dev); | |
c4028958 | 1290 | nl->dev = dev; |
1da177e4 | 1291 | nl->pardev = parport_register_device(port, name, plip_preempt, |
6aa20a22 | 1292 | plip_wakeup, plip_interrupt, |
1da177e4 LT |
1293 | 0, dev); |
1294 | ||
1295 | if (!nl->pardev) { | |
1296 | printk(KERN_ERR "%s: parport_register failed\n", name); | |
1297 | goto err_free_dev; | |
1298 | return; | |
1299 | } | |
1300 | ||
1301 | plip_init_netdev(dev); | |
1302 | ||
1303 | if (register_netdev(dev)) { | |
1304 | printk(KERN_ERR "%s: network register failed\n", name); | |
1305 | goto err_parport_unregister; | |
1306 | } | |
1307 | ||
1308 | printk(KERN_INFO "%s", version); | |
1309 | if (dev->irq != -1) | |
1310 | printk(KERN_INFO "%s: Parallel port at %#3lx, " | |
1311 | "using IRQ %d.\n", | |
1312 | dev->name, dev->base_addr, dev->irq); | |
1313 | else | |
1314 | printk(KERN_INFO "%s: Parallel port at %#3lx, " | |
1315 | "not using IRQ.\n", | |
1316 | dev->name, dev->base_addr); | |
1317 | dev_plip[unit++] = dev; | |
1318 | } | |
1319 | return; | |
1320 | ||
1321 | err_parport_unregister: | |
1322 | parport_unregister_device(nl->pardev); | |
1323 | err_free_dev: | |
1324 | free_netdev(dev); | |
1325 | return; | |
1326 | } | |
1327 | ||
1328 | /* plip_detach() is called (by the parport code) when a port is | |
1329 | * no longer available to use. */ | |
1330 | static void plip_detach (struct parport *port) | |
1331 | { | |
1332 | /* Nothing to do */ | |
1333 | } | |
1334 | ||
1335 | static struct parport_driver plip_driver = { | |
1336 | .name = "plip", | |
1337 | .attach = plip_attach, | |
1338 | .detach = plip_detach | |
1339 | }; | |
1340 | ||
1341 | static void __exit plip_cleanup_module (void) | |
1342 | { | |
1343 | struct net_device *dev; | |
1344 | int i; | |
1345 | ||
1346 | parport_unregister_driver (&plip_driver); | |
1347 | ||
1348 | for (i=0; i < PLIP_MAX; i++) { | |
1349 | if ((dev = dev_plip[i])) { | |
1350 | struct net_local *nl = netdev_priv(dev); | |
1351 | unregister_netdev(dev); | |
1352 | if (nl->port_owner) | |
1353 | parport_release(nl->pardev); | |
1354 | parport_unregister_device(nl->pardev); | |
1355 | free_netdev(dev); | |
1356 | dev_plip[i] = NULL; | |
1357 | } | |
1358 | } | |
1359 | } | |
1360 | ||
1361 | #ifndef MODULE | |
1362 | ||
1363 | static int parport_ptr; | |
1364 | ||
1365 | static int __init plip_setup(char *str) | |
1366 | { | |
1367 | int ints[4]; | |
1368 | ||
1369 | str = get_options(str, ARRAY_SIZE(ints), ints); | |
1370 | ||
1371 | /* Ugh. */ | |
1372 | if (!strncmp(str, "parport", 7)) { | |
1373 | int n = simple_strtoul(str+7, NULL, 10); | |
1374 | if (parport_ptr < PLIP_MAX) | |
1375 | parport[parport_ptr++] = n; | |
1376 | else | |
1377 | printk(KERN_INFO "plip: too many ports, %s ignored.\n", | |
1378 | str); | |
1379 | } else if (!strcmp(str, "timid")) { | |
1380 | timid = 1; | |
1381 | } else { | |
1382 | if (ints[0] == 0 || ints[1] == 0) { | |
1383 | /* disable driver on "plip=" or "plip=0" */ | |
1384 | parport[0] = -2; | |
1385 | } else { | |
6aa20a22 | 1386 | printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n", |
1da177e4 LT |
1387 | ints[1]); |
1388 | } | |
1389 | } | |
1390 | return 1; | |
1391 | } | |
1392 | ||
1393 | __setup("plip=", plip_setup); | |
1394 | ||
1395 | #endif /* !MODULE */ | |
1396 | ||
1397 | static int __init plip_init (void) | |
1398 | { | |
1399 | if (parport[0] == -2) | |
1400 | return 0; | |
1401 | ||
1402 | if (parport[0] != -1 && timid) { | |
1403 | printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n"); | |
1404 | timid = 0; | |
1405 | } | |
1406 | ||
1407 | if (parport_register_driver (&plip_driver)) { | |
1408 | printk (KERN_WARNING "plip: couldn't register driver\n"); | |
1409 | return 1; | |
1410 | } | |
1411 | ||
1412 | return 0; | |
1413 | } | |
1414 | ||
1415 | module_init(plip_init); | |
1416 | module_exit(plip_cleanup_module); | |
1417 | MODULE_LICENSE("GPL"); | |
1418 | ||
1419 | /* | |
1420 | * Local variables: | |
1421 | * compile-command: "gcc -DMODULE -DMODVERSIONS -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -g -fomit-frame-pointer -pipe -c plip.c" | |
1422 | * End: | |
1423 | */ |