Commit | Line | Data |
---|---|---|
0b52b749 BP |
1 | /* |
2 | * | |
3 | * Copyright 1999 Digi International (www.digi.com) | |
4 | * James Puzzo <jamesp at digi dot com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2, or (at your option) | |
9 | * any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the | |
13 | * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR | |
14 | * PURPOSE. See the GNU General Public License for more details. | |
15 | * | |
16 | */ | |
17 | ||
18 | /* | |
19 | * | |
20 | * Filename: | |
21 | * | |
22 | * dgrp_dpa_ops.c | |
23 | * | |
24 | * Description: | |
25 | * | |
26 | * Handle the file operations required for the "dpa" devices. | |
27 | * Includes those functions required to register the "dpa" devices | |
28 | * in "/proc". | |
29 | * | |
30 | * Author: | |
31 | * | |
32 | * James A. Puzzo | |
33 | * | |
34 | */ | |
35 | ||
36 | #include <linux/module.h> | |
37 | #include <linux/proc_fs.h> | |
38 | #include <linux/tty.h> | |
39 | #include <linux/poll.h> | |
40 | #include <linux/cred.h> | |
41 | #include <linux/sched.h> | |
42 | #include <linux/ratelimit.h> | |
43 | #include <asm/unaligned.h> | |
44 | ||
45 | #include "dgrp_common.h" | |
46 | ||
47 | /* File operation declarations */ | |
48 | static int dgrp_dpa_open(struct inode *, struct file *); | |
49 | static int dgrp_dpa_release(struct inode *, struct file *); | |
50 | static ssize_t dgrp_dpa_read(struct file *, char __user *, size_t, loff_t *); | |
51 | static long dgrp_dpa_ioctl(struct file *file, unsigned int cmd, | |
52 | unsigned long arg); | |
53 | static unsigned int dgrp_dpa_select(struct file *, struct poll_table_struct *); | |
54 | ||
55 | static const struct file_operations dpa_ops = { | |
56 | .owner = THIS_MODULE, | |
57 | .read = dgrp_dpa_read, | |
58 | .poll = dgrp_dpa_select, | |
59 | .unlocked_ioctl = dgrp_dpa_ioctl, | |
60 | .open = dgrp_dpa_open, | |
61 | .release = dgrp_dpa_release, | |
62 | }; | |
63 | ||
64 | static struct inode_operations dpa_inode_ops = { | |
65 | .permission = dgrp_inode_permission | |
66 | }; | |
67 | ||
68 | ||
69 | ||
70 | struct digi_node { | |
71 | uint nd_state; /* Node state: 1 = up, 0 = down. */ | |
72 | uint nd_chan_count; /* Number of channels found */ | |
73 | uint nd_tx_byte; /* Tx data count */ | |
74 | uint nd_rx_byte; /* RX data count */ | |
75 | u8 nd_ps_desc[MAX_DESC_LEN]; /* Description from PS */ | |
76 | }; | |
77 | ||
78 | #define DIGI_GETNODE (('d'<<8) | 249) /* get board info */ | |
79 | ||
80 | ||
81 | struct digi_chan { | |
82 | uint ch_port; /* Port number to get info on */ | |
83 | uint ch_open; /* 1 if open, 0 if not */ | |
84 | uint ch_txcount; /* TX data count */ | |
85 | uint ch_rxcount; /* RX data count */ | |
86 | uint ch_s_brate; /* Realport BRATE */ | |
87 | uint ch_s_estat; /* Realport ELAST */ | |
88 | uint ch_s_cflag; /* Realport CFLAG */ | |
89 | uint ch_s_iflag; /* Realport IFLAG */ | |
90 | uint ch_s_oflag; /* Realport OFLAG */ | |
91 | uint ch_s_xflag; /* Realport XFLAG */ | |
92 | uint ch_s_mstat; /* Realport MLAST */ | |
93 | }; | |
94 | ||
95 | #define DIGI_GETCHAN (('d'<<8) | 248) /* get channel info */ | |
96 | ||
97 | ||
98 | struct digi_vpd { | |
99 | int vpd_len; | |
100 | char vpd_data[VPDSIZE]; | |
101 | }; | |
102 | ||
103 | #define DIGI_GETVPD (('d'<<8) | 246) /* get VPD info */ | |
104 | ||
105 | ||
106 | struct digi_debug { | |
107 | int onoff; | |
108 | int port; | |
109 | }; | |
110 | ||
111 | #define DIGI_SETDEBUG (('d'<<8) | 247) /* set debug info */ | |
112 | ||
113 | ||
114 | void dgrp_register_dpa_hook(struct proc_dir_entry *de) | |
115 | { | |
116 | struct nd_struct *node = de->data; | |
117 | ||
118 | de->proc_iops = &dpa_inode_ops; | |
119 | de->proc_fops = &dpa_ops; | |
120 | ||
121 | node->nd_dpa_de = de; | |
122 | spin_lock_init(&node->nd_dpa_lock); | |
123 | } | |
124 | ||
125 | /* | |
126 | * dgrp_dpa_open -- open the DPA device for a particular PortServer | |
127 | */ | |
128 | static int dgrp_dpa_open(struct inode *inode, struct file *file) | |
129 | { | |
130 | struct nd_struct *nd; | |
131 | int rtn = 0; | |
132 | ||
133 | struct proc_dir_entry *de; | |
134 | ||
135 | rtn = try_module_get(THIS_MODULE); | |
136 | if (!rtn) | |
137 | return -ENXIO; | |
138 | ||
139 | rtn = 0; | |
140 | ||
141 | if (!capable(CAP_SYS_ADMIN)) { | |
142 | rtn = -EPERM; | |
143 | goto done; | |
144 | } | |
145 | ||
146 | /* | |
147 | * Make sure that the "private_data" field hasn't already been used. | |
148 | */ | |
149 | if (file->private_data) { | |
150 | rtn = -EINVAL; | |
151 | goto done; | |
152 | } | |
153 | ||
154 | /* | |
155 | * Get the node pointer, and fail if it doesn't exist. | |
156 | */ | |
157 | de = PDE(inode); | |
158 | if (!de) { | |
159 | rtn = -ENXIO; | |
160 | goto done; | |
161 | } | |
162 | nd = (struct nd_struct *)de->data; | |
163 | if (!nd) { | |
164 | rtn = -ENXIO; | |
165 | goto done; | |
166 | } | |
167 | ||
168 | file->private_data = (void *) nd; | |
169 | ||
170 | /* | |
171 | * Allocate the DPA buffer. | |
172 | */ | |
173 | ||
174 | if (nd->nd_dpa_buf) { | |
175 | rtn = -EBUSY; | |
176 | } else { | |
177 | nd->nd_dpa_buf = kmalloc(DPA_MAX, GFP_KERNEL); | |
178 | ||
179 | if (!nd->nd_dpa_buf) { | |
180 | rtn = -ENOMEM; | |
181 | } else { | |
182 | nd->nd_dpa_out = 0; | |
183 | nd->nd_dpa_in = 0; | |
184 | nd->nd_dpa_lbolt = jiffies; | |
185 | } | |
186 | } | |
187 | ||
188 | done: | |
189 | ||
190 | if (rtn) | |
191 | module_put(THIS_MODULE); | |
192 | return rtn; | |
193 | } | |
194 | ||
195 | /* | |
196 | * dgrp_dpa_release -- close the DPA device for a particular PortServer | |
197 | */ | |
198 | static int dgrp_dpa_release(struct inode *inode, struct file *file) | |
199 | { | |
200 | struct nd_struct *nd; | |
201 | u8 *buf; | |
202 | unsigned long lock_flags; | |
203 | ||
204 | /* | |
205 | * Get the node pointer, and quit if it doesn't exist. | |
206 | */ | |
207 | nd = (struct nd_struct *)(file->private_data); | |
208 | if (!nd) | |
209 | goto done; | |
210 | ||
211 | /* | |
212 | * Free the dpa buffer. | |
213 | */ | |
214 | ||
215 | spin_lock_irqsave(&nd->nd_dpa_lock, lock_flags); | |
216 | ||
217 | buf = nd->nd_dpa_buf; | |
218 | ||
219 | nd->nd_dpa_buf = NULL; | |
220 | nd->nd_dpa_out = nd->nd_dpa_in; | |
221 | ||
222 | /* | |
223 | * Wakeup any thread waiting for buffer space. | |
224 | */ | |
225 | ||
226 | if (nd->nd_dpa_flag & DPA_WAIT_SPACE) { | |
227 | nd->nd_dpa_flag &= ~DPA_WAIT_SPACE; | |
228 | wake_up_interruptible(&nd->nd_dpa_wqueue); | |
229 | } | |
230 | ||
231 | spin_unlock_irqrestore(&nd->nd_dpa_lock, lock_flags); | |
232 | ||
233 | kfree(buf); | |
234 | ||
235 | done: | |
236 | module_put(THIS_MODULE); | |
237 | file->private_data = NULL; | |
238 | return 0; | |
239 | } | |
240 | ||
241 | /* | |
242 | * dgrp_dpa_read | |
243 | * | |
244 | * Copy data from the monitoring buffer to the user, freeing space | |
245 | * in the monitoring buffer for more messages | |
246 | */ | |
247 | static ssize_t dgrp_dpa_read(struct file *file, char __user *buf, size_t count, | |
248 | loff_t *ppos) | |
249 | { | |
250 | struct nd_struct *nd; | |
251 | int n; | |
252 | int r; | |
253 | int offset = 0; | |
254 | int res = 0; | |
255 | ssize_t rtn; | |
256 | unsigned long lock_flags; | |
257 | ||
258 | /* | |
259 | * Get the node pointer, and quit if it doesn't exist. | |
260 | */ | |
261 | nd = (struct nd_struct *)(file->private_data); | |
262 | if (!nd) | |
263 | return -ENXIO; | |
264 | ||
265 | /* | |
266 | * Wait for some data to appear in the buffer. | |
267 | */ | |
268 | ||
269 | spin_lock_irqsave(&nd->nd_dpa_lock, lock_flags); | |
270 | ||
271 | for (;;) { | |
272 | n = (nd->nd_dpa_in - nd->nd_dpa_out) & DPA_MASK; | |
273 | ||
274 | if (n != 0) | |
275 | break; | |
276 | ||
277 | nd->nd_dpa_flag |= DPA_WAIT_DATA; | |
278 | ||
279 | spin_unlock_irqrestore(&nd->nd_dpa_lock, lock_flags); | |
280 | ||
281 | /* | |
282 | * Go to sleep waiting until the condition becomes true. | |
283 | */ | |
284 | rtn = wait_event_interruptible(nd->nd_dpa_wqueue, | |
285 | ((nd->nd_dpa_flag & DPA_WAIT_DATA) == 0)); | |
286 | ||
287 | if (rtn) | |
288 | return rtn; | |
289 | ||
290 | spin_lock_irqsave(&nd->nd_dpa_lock, lock_flags); | |
291 | } | |
292 | ||
293 | /* | |
294 | * Read whatever is there. | |
295 | */ | |
296 | ||
297 | if (n > count) | |
298 | n = count; | |
299 | ||
300 | res = n; | |
301 | ||
302 | r = DPA_MAX - nd->nd_dpa_out; | |
303 | ||
304 | if (r <= n) { | |
305 | ||
306 | spin_unlock_irqrestore(&nd->nd_dpa_lock, lock_flags); | |
307 | rtn = copy_to_user((void __user *)buf, | |
308 | nd->nd_dpa_buf + nd->nd_dpa_out, r); | |
309 | spin_lock_irqsave(&nd->nd_dpa_lock, lock_flags); | |
310 | ||
311 | if (rtn) { | |
312 | rtn = -EFAULT; | |
313 | goto done; | |
314 | } | |
315 | ||
316 | nd->nd_dpa_out = 0; | |
317 | n -= r; | |
318 | offset = r; | |
319 | } | |
320 | ||
321 | spin_unlock_irqrestore(&nd->nd_dpa_lock, lock_flags); | |
322 | rtn = copy_to_user((void __user *)buf + offset, | |
323 | nd->nd_dpa_buf + nd->nd_dpa_out, n); | |
324 | spin_lock_irqsave(&nd->nd_dpa_lock, lock_flags); | |
325 | ||
326 | if (rtn) { | |
327 | rtn = -EFAULT; | |
328 | goto done; | |
329 | } | |
330 | ||
331 | nd->nd_dpa_out += n; | |
332 | ||
333 | *ppos += res; | |
334 | ||
335 | rtn = res; | |
336 | ||
337 | /* | |
338 | * Wakeup any thread waiting for buffer space. | |
339 | */ | |
340 | ||
341 | n = (nd->nd_dpa_in - nd->nd_dpa_out) & DPA_MASK; | |
342 | ||
343 | if (nd->nd_dpa_flag & DPA_WAIT_SPACE && | |
344 | (DPA_MAX - n) > DPA_HIGH_WATER) { | |
345 | nd->nd_dpa_flag &= ~DPA_WAIT_SPACE; | |
346 | wake_up_interruptible(&nd->nd_dpa_wqueue); | |
347 | } | |
348 | ||
349 | done: | |
350 | spin_unlock_irqrestore(&nd->nd_dpa_lock, lock_flags); | |
351 | return rtn; | |
352 | } | |
353 | ||
354 | static unsigned int dgrp_dpa_select(struct file *file, | |
355 | struct poll_table_struct *table) | |
356 | { | |
357 | unsigned int retval = 0; | |
358 | struct nd_struct *nd = file->private_data; | |
359 | ||
360 | if (nd->nd_dpa_out != nd->nd_dpa_in) | |
361 | retval |= POLLIN | POLLRDNORM; /* Conditionally readable */ | |
362 | ||
363 | retval |= POLLOUT | POLLWRNORM; /* Always writeable */ | |
364 | ||
365 | return retval; | |
366 | } | |
367 | ||
368 | static long dgrp_dpa_ioctl(struct file *file, unsigned int cmd, | |
369 | unsigned long arg) | |
370 | { | |
371 | ||
372 | struct nd_struct *nd; | |
373 | struct digi_chan getchan; | |
374 | struct digi_node getnode; | |
375 | struct ch_struct *ch; | |
376 | struct digi_debug setdebug; | |
377 | struct digi_vpd vpd; | |
378 | unsigned int port; | |
379 | void __user *uarg = (void __user *) arg; | |
380 | ||
381 | nd = file->private_data; | |
382 | ||
383 | switch (cmd) { | |
384 | case DIGI_GETCHAN: | |
385 | if (copy_from_user(&getchan, uarg, sizeof(struct digi_chan))) | |
386 | return -EFAULT; | |
387 | ||
388 | port = getchan.ch_port; | |
389 | ||
720a9bec | 390 | if (port > nd->nd_chan_count) |
0b52b749 BP |
391 | return -EINVAL; |
392 | ||
393 | ch = nd->nd_chan + port; | |
394 | ||
395 | getchan.ch_open = (ch->ch_open_count > 0) ? 1 : 0; | |
396 | getchan.ch_txcount = ch->ch_txcount; | |
397 | getchan.ch_rxcount = ch->ch_rxcount; | |
398 | getchan.ch_s_brate = ch->ch_s_brate; | |
399 | getchan.ch_s_estat = ch->ch_s_elast; | |
400 | getchan.ch_s_cflag = ch->ch_s_cflag; | |
401 | getchan.ch_s_iflag = ch->ch_s_iflag; | |
402 | getchan.ch_s_oflag = ch->ch_s_oflag; | |
403 | getchan.ch_s_xflag = ch->ch_s_xflag; | |
404 | getchan.ch_s_mstat = ch->ch_s_mlast; | |
405 | ||
406 | if (copy_to_user(uarg, &getchan, sizeof(struct digi_chan))) | |
407 | return -EFAULT; | |
408 | break; | |
409 | ||
410 | ||
411 | case DIGI_GETNODE: | |
412 | getnode.nd_state = (nd->nd_state & NS_READY) ? 1 : 0; | |
413 | getnode.nd_chan_count = nd->nd_chan_count; | |
414 | getnode.nd_tx_byte = nd->nd_tx_byte; | |
415 | getnode.nd_rx_byte = nd->nd_rx_byte; | |
416 | ||
417 | memset(&getnode.nd_ps_desc, 0, MAX_DESC_LEN); | |
418 | strncpy(getnode.nd_ps_desc, nd->nd_ps_desc, MAX_DESC_LEN); | |
419 | ||
420 | if (copy_to_user(uarg, &getnode, sizeof(struct digi_node))) | |
421 | return -EFAULT; | |
422 | break; | |
423 | ||
424 | ||
425 | case DIGI_SETDEBUG: | |
426 | if (copy_from_user(&setdebug, uarg, sizeof(struct digi_debug))) | |
427 | return -EFAULT; | |
428 | ||
429 | nd->nd_dpa_debug = setdebug.onoff; | |
430 | nd->nd_dpa_port = setdebug.port; | |
431 | break; | |
432 | ||
433 | ||
434 | case DIGI_GETVPD: | |
435 | if (nd->nd_vpd_len > 0) { | |
436 | vpd.vpd_len = nd->nd_vpd_len; | |
437 | memcpy(&vpd.vpd_data, &nd->nd_vpd, nd->nd_vpd_len); | |
438 | } else { | |
439 | vpd.vpd_len = 0; | |
440 | } | |
441 | ||
442 | if (copy_to_user(uarg, &vpd, sizeof(struct digi_vpd))) | |
443 | return -EFAULT; | |
444 | break; | |
445 | } | |
446 | ||
447 | return 0; | |
448 | } | |
449 | ||
450 | /** | |
451 | * dgrp_dpa() -- send data to the device monitor queue | |
452 | * @nd: pointer to a node structure | |
453 | * @buf: buffer of data to copy to the monitoring buffer | |
454 | * @len: number of bytes to transfer to the buffer | |
455 | * | |
456 | * Called by the net device routines to send data to the device | |
457 | * monitor queue. If the device monitor buffer is too full to | |
458 | * accept the data, it waits until the buffer is ready. | |
459 | */ | |
460 | static void dgrp_dpa(struct nd_struct *nd, u8 *buf, int nbuf) | |
461 | { | |
462 | int n; | |
463 | int r; | |
464 | unsigned long lock_flags; | |
465 | ||
466 | /* | |
467 | * Grab DPA lock. | |
468 | */ | |
469 | spin_lock_irqsave(&nd->nd_dpa_lock, lock_flags); | |
470 | ||
471 | /* | |
472 | * Loop while data remains. | |
473 | */ | |
474 | while (nbuf > 0 && nd->nd_dpa_buf != NULL) { | |
475 | ||
476 | n = (nd->nd_dpa_out - nd->nd_dpa_in - 1) & DPA_MASK; | |
477 | ||
478 | /* | |
479 | * Enforce flow control on the DPA device. | |
480 | */ | |
481 | if (n < (DPA_MAX - DPA_HIGH_WATER)) | |
482 | nd->nd_dpa_flag |= DPA_WAIT_SPACE; | |
483 | ||
484 | /* | |
485 | * This should never happen, as the flow control above | |
486 | * should have stopped things before they got to this point. | |
487 | */ | |
488 | if (n == 0) { | |
489 | spin_unlock_irqrestore(&nd->nd_dpa_lock, lock_flags); | |
490 | return; | |
491 | } | |
492 | ||
493 | /* | |
494 | * Copy as much data as will fit. | |
495 | */ | |
496 | ||
497 | if (n > nbuf) | |
498 | n = nbuf; | |
499 | ||
500 | r = DPA_MAX - nd->nd_dpa_in; | |
501 | ||
502 | if (r <= n) { | |
503 | memcpy(nd->nd_dpa_buf + nd->nd_dpa_in, buf, r); | |
504 | ||
505 | n -= r; | |
506 | ||
507 | nd->nd_dpa_in = 0; | |
508 | ||
509 | buf += r; | |
510 | nbuf -= r; | |
511 | } | |
512 | ||
513 | memcpy(nd->nd_dpa_buf + nd->nd_dpa_in, buf, n); | |
514 | ||
515 | nd->nd_dpa_in += n; | |
516 | ||
517 | buf += n; | |
518 | nbuf -= n; | |
519 | ||
520 | if (nd->nd_dpa_in >= DPA_MAX) | |
521 | pr_info_ratelimited("%s - nd->nd_dpa_in (%i) >= DPA_MAX\n", | |
522 | __func__, nd->nd_dpa_in); | |
523 | ||
524 | /* | |
525 | * Wakeup any thread waiting for data | |
526 | */ | |
527 | if (nd->nd_dpa_flag & DPA_WAIT_DATA) { | |
528 | nd->nd_dpa_flag &= ~DPA_WAIT_DATA; | |
529 | wake_up_interruptible(&nd->nd_dpa_wqueue); | |
530 | } | |
531 | } | |
532 | ||
533 | /* | |
534 | * Release the DPA lock. | |
535 | */ | |
536 | spin_unlock_irqrestore(&nd->nd_dpa_lock, lock_flags); | |
537 | } | |
538 | ||
539 | /** | |
540 | * dgrp_monitor_data() -- builds a DPA data packet | |
541 | * @nd: pointer to a node structure | |
542 | * @type: type of message to be logged in the DPA buffer | |
543 | * @buf: buffer of data to be logged in the DPA buffer | |
544 | * @size -- number of bytes in the "buf" buffer | |
545 | */ | |
546 | void dgrp_dpa_data(struct nd_struct *nd, int type, u8 *buf, int size) | |
547 | { | |
548 | u8 header[5]; | |
549 | ||
550 | header[0] = type; | |
551 | ||
552 | put_unaligned_be32(size, header + 1); | |
553 | ||
554 | dgrp_dpa(nd, header, sizeof(header)); | |
555 | dgrp_dpa(nd, buf, size); | |
556 | } |