2 * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
3 * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
4 * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
9 * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
12 * Theory of operation:
14 * At the lowest level, there is the standard driver for the CD/DVD device,
15 * typically ide-cd.c or sr.c. This driver can handle read and write requests,
16 * but it doesn't know anything about the special restrictions that apply to
17 * packet writing. One restriction is that write requests must be aligned to
18 * packet boundaries on the physical media, and the size of a write request
19 * must be equal to the packet size. Another restriction is that a
20 * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
21 * command, if the previous command was a write.
23 * The purpose of the packet writing driver is to hide these restrictions from
24 * higher layers, such as file systems, and present a block device that can be
25 * randomly read and written using 2kB-sized blocks.
27 * The lowest layer in the packet writing driver is the packet I/O scheduler.
28 * Its data is defined by the struct packet_iosched and includes two bio
29 * queues with pending read and write requests. These queues are processed
30 * by the pkt_iosched_process_queue() function. The write requests in this
31 * queue are already properly aligned and sized. This layer is responsible for
32 * issuing the flush cache commands and scheduling the I/O in a good order.
34 * The next layer transforms unaligned write requests to aligned writes. This
35 * transformation requires reading missing pieces of data from the underlying
36 * block device, assembling the pieces to full packets and queuing them to the
37 * packet I/O scheduler.
39 * At the top layer there is a custom make_request_fn function that forwards
40 * read requests directly to the iosched queue and puts write requests in the
41 * unaligned write queue. A kernel thread performs the necessary read
42 * gathering to convert the unaligned writes to aligned writes and then feeds
43 * them to the packet I/O scheduler.
45 *************************************************************************/
47 #include <linux/pktcdvd.h>
48 #include <linux/module.h>
49 #include <linux/types.h>
50 #include <linux/kernel.h>
51 #include <linux/compat.h>
52 #include <linux/kthread.h>
53 #include <linux/errno.h>
54 #include <linux/spinlock.h>
55 #include <linux/file.h>
56 #include <linux/proc_fs.h>
57 #include <linux/seq_file.h>
58 #include <linux/miscdevice.h>
59 #include <linux/freezer.h>
60 #include <linux/mutex.h>
61 #include <linux/slab.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_ioctl.h>
64 #include <scsi/scsi.h>
65 #include <linux/debugfs.h>
66 #include <linux/device.h>
68 #include <asm/uaccess.h>
70 #define DRIVER_NAME "pktcdvd"
73 #define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
75 #define DPRINTK(fmt, args...)
79 #define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
81 #define VPRINTK(fmt, args...)
84 #define MAX_SPEED 0xffff
86 #define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
88 static DEFINE_MUTEX(pktcdvd_mutex
);
89 static struct pktcdvd_device
*pkt_devs
[MAX_WRITERS
];
90 static struct proc_dir_entry
*pkt_proc
;
91 static int pktdev_major
;
92 static int write_congestion_on
= PKT_WRITE_CONGESTION_ON
;
93 static int write_congestion_off
= PKT_WRITE_CONGESTION_OFF
;
94 static struct mutex ctl_mutex
; /* Serialize open/close/setup/teardown */
95 static mempool_t
*psd_pool
;
97 static struct class *class_pktcdvd
= NULL
; /* /sys/class/pktcdvd */
98 static struct dentry
*pkt_debugfs_root
= NULL
; /* /sys/kernel/debug/pktcdvd */
100 /* forward declaration */
101 static int pkt_setup_dev(dev_t dev
, dev_t
* pkt_dev
);
102 static int pkt_remove_dev(dev_t pkt_dev
);
103 static int pkt_seq_show(struct seq_file
*m
, void *p
);
108 * create and register a pktcdvd kernel object.
110 static struct pktcdvd_kobj
* pkt_kobj_create(struct pktcdvd_device
*pd
,
112 struct kobject
* parent
,
113 struct kobj_type
* ktype
)
115 struct pktcdvd_kobj
*p
;
118 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
122 error
= kobject_init_and_add(&p
->kobj
, ktype
, parent
, "%s", name
);
124 kobject_put(&p
->kobj
);
127 kobject_uevent(&p
->kobj
, KOBJ_ADD
);
131 * remove a pktcdvd kernel object.
133 static void pkt_kobj_remove(struct pktcdvd_kobj
*p
)
136 kobject_put(&p
->kobj
);
139 * default release function for pktcdvd kernel objects.
141 static void pkt_kobj_release(struct kobject
*kobj
)
143 kfree(to_pktcdvdkobj(kobj
));
147 /**********************************************************
149 * sysfs interface for pktcdvd
150 * by (C) 2006 Thomas Maier <balagi@justmail.de>
152 **********************************************************/
154 #define DEF_ATTR(_obj,_name,_mode) \
155 static struct attribute _obj = { .name = _name, .mode = _mode }
157 /**********************************************************
158 /sys/class/pktcdvd/pktcdvd[0-7]/
161 stat/packets_finished
166 write_queue/congestion_off
167 write_queue/congestion_on
168 **********************************************************/
170 DEF_ATTR(kobj_pkt_attr_st1
, "reset", 0200);
171 DEF_ATTR(kobj_pkt_attr_st2
, "packets_started", 0444);
172 DEF_ATTR(kobj_pkt_attr_st3
, "packets_finished", 0444);
173 DEF_ATTR(kobj_pkt_attr_st4
, "kb_written", 0444);
174 DEF_ATTR(kobj_pkt_attr_st5
, "kb_read", 0444);
175 DEF_ATTR(kobj_pkt_attr_st6
, "kb_read_gather", 0444);
177 static struct attribute
*kobj_pkt_attrs_stat
[] = {
187 DEF_ATTR(kobj_pkt_attr_wq1
, "size", 0444);
188 DEF_ATTR(kobj_pkt_attr_wq2
, "congestion_off", 0644);
189 DEF_ATTR(kobj_pkt_attr_wq3
, "congestion_on", 0644);
191 static struct attribute
*kobj_pkt_attrs_wqueue
[] = {
198 static ssize_t
kobj_pkt_show(struct kobject
*kobj
,
199 struct attribute
*attr
, char *data
)
201 struct pktcdvd_device
*pd
= to_pktcdvdkobj(kobj
)->pd
;
204 if (strcmp(attr
->name
, "packets_started") == 0) {
205 n
= sprintf(data
, "%lu\n", pd
->stats
.pkt_started
);
207 } else if (strcmp(attr
->name
, "packets_finished") == 0) {
208 n
= sprintf(data
, "%lu\n", pd
->stats
.pkt_ended
);
210 } else if (strcmp(attr
->name
, "kb_written") == 0) {
211 n
= sprintf(data
, "%lu\n", pd
->stats
.secs_w
>> 1);
213 } else if (strcmp(attr
->name
, "kb_read") == 0) {
214 n
= sprintf(data
, "%lu\n", pd
->stats
.secs_r
>> 1);
216 } else if (strcmp(attr
->name
, "kb_read_gather") == 0) {
217 n
= sprintf(data
, "%lu\n", pd
->stats
.secs_rg
>> 1);
219 } else if (strcmp(attr
->name
, "size") == 0) {
220 spin_lock(&pd
->lock
);
221 v
= pd
->bio_queue_size
;
222 spin_unlock(&pd
->lock
);
223 n
= sprintf(data
, "%d\n", v
);
225 } else if (strcmp(attr
->name
, "congestion_off") == 0) {
226 spin_lock(&pd
->lock
);
227 v
= pd
->write_congestion_off
;
228 spin_unlock(&pd
->lock
);
229 n
= sprintf(data
, "%d\n", v
);
231 } else if (strcmp(attr
->name
, "congestion_on") == 0) {
232 spin_lock(&pd
->lock
);
233 v
= pd
->write_congestion_on
;
234 spin_unlock(&pd
->lock
);
235 n
= sprintf(data
, "%d\n", v
);
240 static void init_write_congestion_marks(int* lo
, int* hi
)
244 *hi
= min(*hi
, 1000000);
248 *lo
= min(*lo
, *hi
- 100);
257 static ssize_t
kobj_pkt_store(struct kobject
*kobj
,
258 struct attribute
*attr
,
259 const char *data
, size_t len
)
261 struct pktcdvd_device
*pd
= to_pktcdvdkobj(kobj
)->pd
;
264 if (strcmp(attr
->name
, "reset") == 0 && len
> 0) {
265 pd
->stats
.pkt_started
= 0;
266 pd
->stats
.pkt_ended
= 0;
267 pd
->stats
.secs_w
= 0;
268 pd
->stats
.secs_rg
= 0;
269 pd
->stats
.secs_r
= 0;
271 } else if (strcmp(attr
->name
, "congestion_off") == 0
272 && sscanf(data
, "%d", &val
) == 1) {
273 spin_lock(&pd
->lock
);
274 pd
->write_congestion_off
= val
;
275 init_write_congestion_marks(&pd
->write_congestion_off
,
276 &pd
->write_congestion_on
);
277 spin_unlock(&pd
->lock
);
279 } else if (strcmp(attr
->name
, "congestion_on") == 0
280 && sscanf(data
, "%d", &val
) == 1) {
281 spin_lock(&pd
->lock
);
282 pd
->write_congestion_on
= val
;
283 init_write_congestion_marks(&pd
->write_congestion_off
,
284 &pd
->write_congestion_on
);
285 spin_unlock(&pd
->lock
);
290 static const struct sysfs_ops kobj_pkt_ops
= {
291 .show
= kobj_pkt_show
,
292 .store
= kobj_pkt_store
294 static struct kobj_type kobj_pkt_type_stat
= {
295 .release
= pkt_kobj_release
,
296 .sysfs_ops
= &kobj_pkt_ops
,
297 .default_attrs
= kobj_pkt_attrs_stat
299 static struct kobj_type kobj_pkt_type_wqueue
= {
300 .release
= pkt_kobj_release
,
301 .sysfs_ops
= &kobj_pkt_ops
,
302 .default_attrs
= kobj_pkt_attrs_wqueue
305 static void pkt_sysfs_dev_new(struct pktcdvd_device
*pd
)
308 pd
->dev
= device_create(class_pktcdvd
, NULL
, MKDEV(0, 0), NULL
,
314 pd
->kobj_stat
= pkt_kobj_create(pd
, "stat",
316 &kobj_pkt_type_stat
);
317 pd
->kobj_wqueue
= pkt_kobj_create(pd
, "write_queue",
319 &kobj_pkt_type_wqueue
);
323 static void pkt_sysfs_dev_remove(struct pktcdvd_device
*pd
)
325 pkt_kobj_remove(pd
->kobj_stat
);
326 pkt_kobj_remove(pd
->kobj_wqueue
);
328 device_unregister(pd
->dev
);
332 /********************************************************************
335 remove unmap packet dev
336 device_map show mappings
337 *******************************************************************/
339 static void class_pktcdvd_release(struct class *cls
)
343 static ssize_t
class_pktcdvd_show_map(struct class *c
,
344 struct class_attribute
*attr
,
349 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
350 for (idx
= 0; idx
< MAX_WRITERS
; idx
++) {
351 struct pktcdvd_device
*pd
= pkt_devs
[idx
];
354 n
+= sprintf(data
+n
, "%s %u:%u %u:%u\n",
356 MAJOR(pd
->pkt_dev
), MINOR(pd
->pkt_dev
),
357 MAJOR(pd
->bdev
->bd_dev
),
358 MINOR(pd
->bdev
->bd_dev
));
360 mutex_unlock(&ctl_mutex
);
364 static ssize_t
class_pktcdvd_store_add(struct class *c
,
365 struct class_attribute
*attr
,
369 unsigned int major
, minor
;
371 if (sscanf(buf
, "%u:%u", &major
, &minor
) == 2) {
372 /* pkt_setup_dev() expects caller to hold reference to self */
373 if (!try_module_get(THIS_MODULE
))
376 pkt_setup_dev(MKDEV(major
, minor
), NULL
);
378 module_put(THIS_MODULE
);
386 static ssize_t
class_pktcdvd_store_remove(struct class *c
,
387 struct class_attribute
*attr
,
391 unsigned int major
, minor
;
392 if (sscanf(buf
, "%u:%u", &major
, &minor
) == 2) {
393 pkt_remove_dev(MKDEV(major
, minor
));
399 static struct class_attribute class_pktcdvd_attrs
[] = {
400 __ATTR(add
, 0200, NULL
, class_pktcdvd_store_add
),
401 __ATTR(remove
, 0200, NULL
, class_pktcdvd_store_remove
),
402 __ATTR(device_map
, 0444, class_pktcdvd_show_map
, NULL
),
407 static int pkt_sysfs_init(void)
412 * create control files in sysfs
413 * /sys/class/pktcdvd/...
415 class_pktcdvd
= kzalloc(sizeof(*class_pktcdvd
), GFP_KERNEL
);
418 class_pktcdvd
->name
= DRIVER_NAME
;
419 class_pktcdvd
->owner
= THIS_MODULE
;
420 class_pktcdvd
->class_release
= class_pktcdvd_release
;
421 class_pktcdvd
->class_attrs
= class_pktcdvd_attrs
;
422 ret
= class_register(class_pktcdvd
);
424 kfree(class_pktcdvd
);
425 class_pktcdvd
= NULL
;
426 printk(DRIVER_NAME
": failed to create class pktcdvd\n");
432 static void pkt_sysfs_cleanup(void)
435 class_destroy(class_pktcdvd
);
436 class_pktcdvd
= NULL
;
439 /********************************************************************
442 /sys/kernel/debug/pktcdvd[0-7]/
445 *******************************************************************/
447 static int pkt_debugfs_seq_show(struct seq_file
*m
, void *p
)
449 return pkt_seq_show(m
, p
);
452 static int pkt_debugfs_fops_open(struct inode
*inode
, struct file
*file
)
454 return single_open(file
, pkt_debugfs_seq_show
, inode
->i_private
);
457 static const struct file_operations debug_fops
= {
458 .open
= pkt_debugfs_fops_open
,
461 .release
= single_release
,
462 .owner
= THIS_MODULE
,
465 static void pkt_debugfs_dev_new(struct pktcdvd_device
*pd
)
467 if (!pkt_debugfs_root
)
469 pd
->dfs_f_info
= NULL
;
470 pd
->dfs_d_root
= debugfs_create_dir(pd
->name
, pkt_debugfs_root
);
471 if (IS_ERR(pd
->dfs_d_root
)) {
472 pd
->dfs_d_root
= NULL
;
475 pd
->dfs_f_info
= debugfs_create_file("info", S_IRUGO
,
476 pd
->dfs_d_root
, pd
, &debug_fops
);
477 if (IS_ERR(pd
->dfs_f_info
)) {
478 pd
->dfs_f_info
= NULL
;
483 static void pkt_debugfs_dev_remove(struct pktcdvd_device
*pd
)
485 if (!pkt_debugfs_root
)
488 debugfs_remove(pd
->dfs_f_info
);
489 pd
->dfs_f_info
= NULL
;
491 debugfs_remove(pd
->dfs_d_root
);
492 pd
->dfs_d_root
= NULL
;
495 static void pkt_debugfs_init(void)
497 pkt_debugfs_root
= debugfs_create_dir(DRIVER_NAME
, NULL
);
498 if (IS_ERR(pkt_debugfs_root
)) {
499 pkt_debugfs_root
= NULL
;
504 static void pkt_debugfs_cleanup(void)
506 if (!pkt_debugfs_root
)
508 debugfs_remove(pkt_debugfs_root
);
509 pkt_debugfs_root
= NULL
;
512 /* ----------------------------------------------------------*/
515 static void pkt_bio_finished(struct pktcdvd_device
*pd
)
517 BUG_ON(atomic_read(&pd
->cdrw
.pending_bios
) <= 0);
518 if (atomic_dec_and_test(&pd
->cdrw
.pending_bios
)) {
519 VPRINTK(DRIVER_NAME
": queue empty\n");
520 atomic_set(&pd
->iosched
.attention
, 1);
521 wake_up(&pd
->wqueue
);
526 * Allocate a packet_data struct
528 static struct packet_data
*pkt_alloc_packet_data(int frames
)
531 struct packet_data
*pkt
;
533 pkt
= kzalloc(sizeof(struct packet_data
), GFP_KERNEL
);
537 pkt
->frames
= frames
;
538 pkt
->w_bio
= bio_kmalloc(GFP_KERNEL
, frames
);
542 for (i
= 0; i
< frames
/ FRAMES_PER_PAGE
; i
++) {
543 pkt
->pages
[i
] = alloc_page(GFP_KERNEL
|__GFP_ZERO
);
548 spin_lock_init(&pkt
->lock
);
549 bio_list_init(&pkt
->orig_bios
);
551 for (i
= 0; i
< frames
; i
++) {
552 struct bio
*bio
= bio_kmalloc(GFP_KERNEL
, 1);
556 pkt
->r_bios
[i
] = bio
;
562 for (i
= 0; i
< frames
; i
++) {
563 struct bio
*bio
= pkt
->r_bios
[i
];
569 for (i
= 0; i
< frames
/ FRAMES_PER_PAGE
; i
++)
571 __free_page(pkt
->pages
[i
]);
580 * Free a packet_data struct
582 static void pkt_free_packet_data(struct packet_data
*pkt
)
586 for (i
= 0; i
< pkt
->frames
; i
++) {
587 struct bio
*bio
= pkt
->r_bios
[i
];
591 for (i
= 0; i
< pkt
->frames
/ FRAMES_PER_PAGE
; i
++)
592 __free_page(pkt
->pages
[i
]);
597 static void pkt_shrink_pktlist(struct pktcdvd_device
*pd
)
599 struct packet_data
*pkt
, *next
;
601 BUG_ON(!list_empty(&pd
->cdrw
.pkt_active_list
));
603 list_for_each_entry_safe(pkt
, next
, &pd
->cdrw
.pkt_free_list
, list
) {
604 pkt_free_packet_data(pkt
);
606 INIT_LIST_HEAD(&pd
->cdrw
.pkt_free_list
);
609 static int pkt_grow_pktlist(struct pktcdvd_device
*pd
, int nr_packets
)
611 struct packet_data
*pkt
;
613 BUG_ON(!list_empty(&pd
->cdrw
.pkt_free_list
));
615 while (nr_packets
> 0) {
616 pkt
= pkt_alloc_packet_data(pd
->settings
.size
>> 2);
618 pkt_shrink_pktlist(pd
);
621 pkt
->id
= nr_packets
;
623 list_add(&pkt
->list
, &pd
->cdrw
.pkt_free_list
);
629 static inline struct pkt_rb_node
*pkt_rbtree_next(struct pkt_rb_node
*node
)
631 struct rb_node
*n
= rb_next(&node
->rb_node
);
634 return rb_entry(n
, struct pkt_rb_node
, rb_node
);
637 static void pkt_rbtree_erase(struct pktcdvd_device
*pd
, struct pkt_rb_node
*node
)
639 rb_erase(&node
->rb_node
, &pd
->bio_queue
);
640 mempool_free(node
, pd
->rb_pool
);
641 pd
->bio_queue_size
--;
642 BUG_ON(pd
->bio_queue_size
< 0);
646 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
648 static struct pkt_rb_node
*pkt_rbtree_find(struct pktcdvd_device
*pd
, sector_t s
)
650 struct rb_node
*n
= pd
->bio_queue
.rb_node
;
651 struct rb_node
*next
;
652 struct pkt_rb_node
*tmp
;
655 BUG_ON(pd
->bio_queue_size
> 0);
660 tmp
= rb_entry(n
, struct pkt_rb_node
, rb_node
);
661 if (s
<= tmp
->bio
->bi_sector
)
670 if (s
> tmp
->bio
->bi_sector
) {
671 tmp
= pkt_rbtree_next(tmp
);
675 BUG_ON(s
> tmp
->bio
->bi_sector
);
680 * Insert a node into the pd->bio_queue rb tree.
682 static void pkt_rbtree_insert(struct pktcdvd_device
*pd
, struct pkt_rb_node
*node
)
684 struct rb_node
**p
= &pd
->bio_queue
.rb_node
;
685 struct rb_node
*parent
= NULL
;
686 sector_t s
= node
->bio
->bi_sector
;
687 struct pkt_rb_node
*tmp
;
691 tmp
= rb_entry(parent
, struct pkt_rb_node
, rb_node
);
692 if (s
< tmp
->bio
->bi_sector
)
697 rb_link_node(&node
->rb_node
, parent
, p
);
698 rb_insert_color(&node
->rb_node
, &pd
->bio_queue
);
699 pd
->bio_queue_size
++;
703 * Send a packet_command to the underlying block device and
704 * wait for completion.
706 static int pkt_generic_packet(struct pktcdvd_device
*pd
, struct packet_command
*cgc
)
708 struct request_queue
*q
= bdev_get_queue(pd
->bdev
);
712 rq
= blk_get_request(q
, (cgc
->data_direction
== CGC_DATA_WRITE
) ?
713 WRITE
: READ
, __GFP_WAIT
);
716 if (blk_rq_map_kern(q
, rq
, cgc
->buffer
, cgc
->buflen
, __GFP_WAIT
))
720 rq
->cmd_len
= COMMAND_SIZE(cgc
->cmd
[0]);
721 memcpy(rq
->cmd
, cgc
->cmd
, CDROM_PACKET_SIZE
);
724 rq
->cmd_type
= REQ_TYPE_BLOCK_PC
;
726 rq
->cmd_flags
|= REQ_QUIET
;
728 blk_execute_rq(rq
->q
, pd
->bdev
->bd_disk
, rq
, 0);
737 * A generic sense dump / resolve mechanism should be implemented across
738 * all ATAPI + SCSI devices.
740 static void pkt_dump_sense(struct packet_command
*cgc
)
742 static char *info
[9] = { "No sense", "Recovered error", "Not ready",
743 "Medium error", "Hardware error", "Illegal request",
744 "Unit attention", "Data protect", "Blank check" };
746 struct request_sense
*sense
= cgc
->sense
;
748 printk(DRIVER_NAME
":");
749 for (i
= 0; i
< CDROM_PACKET_SIZE
; i
++)
750 printk(" %02x", cgc
->cmd
[i
]);
754 printk("no sense\n");
758 printk("sense %02x.%02x.%02x", sense
->sense_key
, sense
->asc
, sense
->ascq
);
760 if (sense
->sense_key
> 8) {
761 printk(" (INVALID)\n");
765 printk(" (%s)\n", info
[sense
->sense_key
]);
769 * flush the drive cache to media
771 static int pkt_flush_cache(struct pktcdvd_device
*pd
)
773 struct packet_command cgc
;
775 init_cdrom_command(&cgc
, NULL
, 0, CGC_DATA_NONE
);
776 cgc
.cmd
[0] = GPCMD_FLUSH_CACHE
;
780 * the IMMED bit -- we default to not setting it, although that
781 * would allow a much faster close, this is safer
786 return pkt_generic_packet(pd
, &cgc
);
790 * speed is given as the normal factor, e.g. 4 for 4x
792 static noinline_for_stack
int pkt_set_speed(struct pktcdvd_device
*pd
,
793 unsigned write_speed
, unsigned read_speed
)
795 struct packet_command cgc
;
796 struct request_sense sense
;
799 init_cdrom_command(&cgc
, NULL
, 0, CGC_DATA_NONE
);
801 cgc
.cmd
[0] = GPCMD_SET_SPEED
;
802 cgc
.cmd
[2] = (read_speed
>> 8) & 0xff;
803 cgc
.cmd
[3] = read_speed
& 0xff;
804 cgc
.cmd
[4] = (write_speed
>> 8) & 0xff;
805 cgc
.cmd
[5] = write_speed
& 0xff;
807 if ((ret
= pkt_generic_packet(pd
, &cgc
)))
808 pkt_dump_sense(&cgc
);
814 * Queue a bio for processing by the low-level CD device. Must be called
815 * from process context.
817 static void pkt_queue_bio(struct pktcdvd_device
*pd
, struct bio
*bio
)
819 spin_lock(&pd
->iosched
.lock
);
820 if (bio_data_dir(bio
) == READ
)
821 bio_list_add(&pd
->iosched
.read_queue
, bio
);
823 bio_list_add(&pd
->iosched
.write_queue
, bio
);
824 spin_unlock(&pd
->iosched
.lock
);
826 atomic_set(&pd
->iosched
.attention
, 1);
827 wake_up(&pd
->wqueue
);
831 * Process the queued read/write requests. This function handles special
832 * requirements for CDRW drives:
833 * - A cache flush command must be inserted before a read request if the
834 * previous request was a write.
835 * - Switching between reading and writing is slow, so don't do it more often
837 * - Optimize for throughput at the expense of latency. This means that streaming
838 * writes will never be interrupted by a read, but if the drive has to seek
839 * before the next write, switch to reading instead if there are any pending
841 * - Set the read speed according to current usage pattern. When only reading
842 * from the device, it's best to use the highest possible read speed, but
843 * when switching often between reading and writing, it's better to have the
844 * same read and write speeds.
846 static void pkt_iosched_process_queue(struct pktcdvd_device
*pd
)
849 if (atomic_read(&pd
->iosched
.attention
) == 0)
851 atomic_set(&pd
->iosched
.attention
, 0);
855 int reads_queued
, writes_queued
;
857 spin_lock(&pd
->iosched
.lock
);
858 reads_queued
= !bio_list_empty(&pd
->iosched
.read_queue
);
859 writes_queued
= !bio_list_empty(&pd
->iosched
.write_queue
);
860 spin_unlock(&pd
->iosched
.lock
);
862 if (!reads_queued
&& !writes_queued
)
865 if (pd
->iosched
.writing
) {
866 int need_write_seek
= 1;
867 spin_lock(&pd
->iosched
.lock
);
868 bio
= bio_list_peek(&pd
->iosched
.write_queue
);
869 spin_unlock(&pd
->iosched
.lock
);
870 if (bio
&& (bio
->bi_sector
== pd
->iosched
.last_write
))
872 if (need_write_seek
&& reads_queued
) {
873 if (atomic_read(&pd
->cdrw
.pending_bios
) > 0) {
874 VPRINTK(DRIVER_NAME
": write, waiting\n");
878 pd
->iosched
.writing
= 0;
881 if (!reads_queued
&& writes_queued
) {
882 if (atomic_read(&pd
->cdrw
.pending_bios
) > 0) {
883 VPRINTK(DRIVER_NAME
": read, waiting\n");
886 pd
->iosched
.writing
= 1;
890 spin_lock(&pd
->iosched
.lock
);
891 if (pd
->iosched
.writing
)
892 bio
= bio_list_pop(&pd
->iosched
.write_queue
);
894 bio
= bio_list_pop(&pd
->iosched
.read_queue
);
895 spin_unlock(&pd
->iosched
.lock
);
900 if (bio_data_dir(bio
) == READ
)
901 pd
->iosched
.successive_reads
+= bio
->bi_size
>> 10;
903 pd
->iosched
.successive_reads
= 0;
904 pd
->iosched
.last_write
= bio
->bi_sector
+ bio_sectors(bio
);
906 if (pd
->iosched
.successive_reads
>= HI_SPEED_SWITCH
) {
907 if (pd
->read_speed
== pd
->write_speed
) {
908 pd
->read_speed
= MAX_SPEED
;
909 pkt_set_speed(pd
, pd
->write_speed
, pd
->read_speed
);
912 if (pd
->read_speed
!= pd
->write_speed
) {
913 pd
->read_speed
= pd
->write_speed
;
914 pkt_set_speed(pd
, pd
->write_speed
, pd
->read_speed
);
918 atomic_inc(&pd
->cdrw
.pending_bios
);
919 generic_make_request(bio
);
924 * Special care is needed if the underlying block device has a small
925 * max_phys_segments value.
927 static int pkt_set_segment_merging(struct pktcdvd_device
*pd
, struct request_queue
*q
)
929 if ((pd
->settings
.size
<< 9) / CD_FRAMESIZE
930 <= queue_max_segments(q
)) {
932 * The cdrom device can handle one segment/frame
934 clear_bit(PACKET_MERGE_SEGS
, &pd
->flags
);
936 } else if ((pd
->settings
.size
<< 9) / PAGE_SIZE
937 <= queue_max_segments(q
)) {
939 * We can handle this case at the expense of some extra memory
940 * copies during write operations
942 set_bit(PACKET_MERGE_SEGS
, &pd
->flags
);
945 printk(DRIVER_NAME
": cdrom max_phys_segments too small\n");
951 * Copy CD_FRAMESIZE bytes from src_bio into a destination page
953 static void pkt_copy_bio_data(struct bio
*src_bio
, int seg
, int offs
, struct page
*dst_page
, int dst_offs
)
955 unsigned int copy_size
= CD_FRAMESIZE
;
957 while (copy_size
> 0) {
958 struct bio_vec
*src_bvl
= bio_iovec_idx(src_bio
, seg
);
959 void *vfrom
= kmap_atomic(src_bvl
->bv_page
) +
960 src_bvl
->bv_offset
+ offs
;
961 void *vto
= page_address(dst_page
) + dst_offs
;
962 int len
= min_t(int, copy_size
, src_bvl
->bv_len
- offs
);
965 memcpy(vto
, vfrom
, len
);
966 kunmap_atomic(vfrom
);
976 * Copy all data for this packet to pkt->pages[], so that
977 * a) The number of required segments for the write bio is minimized, which
978 * is necessary for some scsi controllers.
979 * b) The data can be used as cache to avoid read requests if we receive a
980 * new write request for the same zone.
982 static void pkt_make_local_copy(struct packet_data
*pkt
, struct bio_vec
*bvec
)
986 /* Copy all data to pkt->pages[] */
989 for (f
= 0; f
< pkt
->frames
; f
++) {
990 if (bvec
[f
].bv_page
!= pkt
->pages
[p
]) {
991 void *vfrom
= kmap_atomic(bvec
[f
].bv_page
) + bvec
[f
].bv_offset
;
992 void *vto
= page_address(pkt
->pages
[p
]) + offs
;
993 memcpy(vto
, vfrom
, CD_FRAMESIZE
);
994 kunmap_atomic(vfrom
);
995 bvec
[f
].bv_page
= pkt
->pages
[p
];
996 bvec
[f
].bv_offset
= offs
;
998 BUG_ON(bvec
[f
].bv_offset
!= offs
);
1000 offs
+= CD_FRAMESIZE
;
1001 if (offs
>= PAGE_SIZE
) {
1008 static void pkt_end_io_read(struct bio
*bio
, int err
)
1010 struct packet_data
*pkt
= bio
->bi_private
;
1011 struct pktcdvd_device
*pd
= pkt
->pd
;
1014 VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio
,
1015 (unsigned long long)pkt
->sector
, (unsigned long long)bio
->bi_sector
, err
);
1018 atomic_inc(&pkt
->io_errors
);
1019 if (atomic_dec_and_test(&pkt
->io_wait
)) {
1020 atomic_inc(&pkt
->run_sm
);
1021 wake_up(&pd
->wqueue
);
1023 pkt_bio_finished(pd
);
1026 static void pkt_end_io_packet_write(struct bio
*bio
, int err
)
1028 struct packet_data
*pkt
= bio
->bi_private
;
1029 struct pktcdvd_device
*pd
= pkt
->pd
;
1032 VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt
->id
, err
);
1034 pd
->stats
.pkt_ended
++;
1036 pkt_bio_finished(pd
);
1037 atomic_dec(&pkt
->io_wait
);
1038 atomic_inc(&pkt
->run_sm
);
1039 wake_up(&pd
->wqueue
);
1043 * Schedule reads for the holes in a packet
1045 static void pkt_gather_data(struct pktcdvd_device
*pd
, struct packet_data
*pkt
)
1047 int frames_read
= 0;
1050 char written
[PACKET_MAX_SIZE
];
1052 BUG_ON(bio_list_empty(&pkt
->orig_bios
));
1054 atomic_set(&pkt
->io_wait
, 0);
1055 atomic_set(&pkt
->io_errors
, 0);
1058 * Figure out which frames we need to read before we can write.
1060 memset(written
, 0, sizeof(written
));
1061 spin_lock(&pkt
->lock
);
1062 bio_list_for_each(bio
, &pkt
->orig_bios
) {
1063 int first_frame
= (bio
->bi_sector
- pkt
->sector
) / (CD_FRAMESIZE
>> 9);
1064 int num_frames
= bio
->bi_size
/ CD_FRAMESIZE
;
1065 pd
->stats
.secs_w
+= num_frames
* (CD_FRAMESIZE
>> 9);
1066 BUG_ON(first_frame
< 0);
1067 BUG_ON(first_frame
+ num_frames
> pkt
->frames
);
1068 for (f
= first_frame
; f
< first_frame
+ num_frames
; f
++)
1071 spin_unlock(&pkt
->lock
);
1073 if (pkt
->cache_valid
) {
1074 VPRINTK("pkt_gather_data: zone %llx cached\n",
1075 (unsigned long long)pkt
->sector
);
1080 * Schedule reads for missing parts of the packet.
1082 for (f
= 0; f
< pkt
->frames
; f
++) {
1088 bio
= pkt
->r_bios
[f
];
1090 bio
->bi_sector
= pkt
->sector
+ f
* (CD_FRAMESIZE
>> 9);
1091 bio
->bi_bdev
= pd
->bdev
;
1092 bio
->bi_end_io
= pkt_end_io_read
;
1093 bio
->bi_private
= pkt
;
1095 p
= (f
* CD_FRAMESIZE
) / PAGE_SIZE
;
1096 offset
= (f
* CD_FRAMESIZE
) % PAGE_SIZE
;
1097 VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n",
1098 f
, pkt
->pages
[p
], offset
);
1099 if (!bio_add_page(bio
, pkt
->pages
[p
], CD_FRAMESIZE
, offset
))
1102 atomic_inc(&pkt
->io_wait
);
1104 pkt_queue_bio(pd
, bio
);
1109 VPRINTK("pkt_gather_data: need %d frames for zone %llx\n",
1110 frames_read
, (unsigned long long)pkt
->sector
);
1111 pd
->stats
.pkt_started
++;
1112 pd
->stats
.secs_rg
+= frames_read
* (CD_FRAMESIZE
>> 9);
1116 * Find a packet matching zone, or the least recently used packet if
1117 * there is no match.
1119 static struct packet_data
*pkt_get_packet_data(struct pktcdvd_device
*pd
, int zone
)
1121 struct packet_data
*pkt
;
1123 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_free_list
, list
) {
1124 if (pkt
->sector
== zone
|| pkt
->list
.next
== &pd
->cdrw
.pkt_free_list
) {
1125 list_del_init(&pkt
->list
);
1126 if (pkt
->sector
!= zone
)
1127 pkt
->cache_valid
= 0;
1135 static void pkt_put_packet_data(struct pktcdvd_device
*pd
, struct packet_data
*pkt
)
1137 if (pkt
->cache_valid
) {
1138 list_add(&pkt
->list
, &pd
->cdrw
.pkt_free_list
);
1140 list_add_tail(&pkt
->list
, &pd
->cdrw
.pkt_free_list
);
1145 * recover a failed write, query for relocation if possible
1147 * returns 1 if recovery is possible, or 0 if not
1150 static int pkt_start_recovery(struct packet_data
*pkt
)
1153 * FIXME. We need help from the file system to implement
1154 * recovery handling.
1158 struct request
*rq
= pkt
->rq
;
1159 struct pktcdvd_device
*pd
= rq
->rq_disk
->private_data
;
1160 struct block_device
*pkt_bdev
;
1161 struct super_block
*sb
= NULL
;
1162 unsigned long old_block
, new_block
;
1163 sector_t new_sector
;
1165 pkt_bdev
= bdget(kdev_t_to_nr(pd
->pkt_dev
));
1167 sb
= get_super(pkt_bdev
);
1174 if (!sb
->s_op
->relocate_blocks
)
1177 old_block
= pkt
->sector
/ (CD_FRAMESIZE
>> 9);
1178 if (sb
->s_op
->relocate_blocks(sb
, old_block
, &new_block
))
1181 new_sector
= new_block
* (CD_FRAMESIZE
>> 9);
1182 pkt
->sector
= new_sector
;
1184 pkt
->bio
->bi_sector
= new_sector
;
1185 pkt
->bio
->bi_next
= NULL
;
1186 pkt
->bio
->bi_flags
= 1 << BIO_UPTODATE
;
1187 pkt
->bio
->bi_idx
= 0;
1189 BUG_ON(pkt
->bio
->bi_rw
!= REQ_WRITE
);
1190 BUG_ON(pkt
->bio
->bi_vcnt
!= pkt
->frames
);
1191 BUG_ON(pkt
->bio
->bi_size
!= pkt
->frames
* CD_FRAMESIZE
);
1192 BUG_ON(pkt
->bio
->bi_end_io
!= pkt_end_io_packet_write
);
1193 BUG_ON(pkt
->bio
->bi_private
!= pkt
);
1204 static inline void pkt_set_state(struct packet_data
*pkt
, enum packet_data_state state
)
1206 #if PACKET_DEBUG > 1
1207 static const char *state_name
[] = {
1208 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
1210 enum packet_data_state old_state
= pkt
->state
;
1211 VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt
->id
, (unsigned long long)pkt
->sector
,
1212 state_name
[old_state
], state_name
[state
]);
1218 * Scan the work queue to see if we can start a new packet.
1219 * returns non-zero if any work was done.
1221 static int pkt_handle_queue(struct pktcdvd_device
*pd
)
1223 struct packet_data
*pkt
, *p
;
1224 struct bio
*bio
= NULL
;
1225 sector_t zone
= 0; /* Suppress gcc warning */
1226 struct pkt_rb_node
*node
, *first_node
;
1230 VPRINTK("handle_queue\n");
1232 atomic_set(&pd
->scan_queue
, 0);
1234 if (list_empty(&pd
->cdrw
.pkt_free_list
)) {
1235 VPRINTK("handle_queue: no pkt\n");
1240 * Try to find a zone we are not already working on.
1242 spin_lock(&pd
->lock
);
1243 first_node
= pkt_rbtree_find(pd
, pd
->current_sector
);
1245 n
= rb_first(&pd
->bio_queue
);
1247 first_node
= rb_entry(n
, struct pkt_rb_node
, rb_node
);
1252 zone
= ZONE(bio
->bi_sector
, pd
);
1253 list_for_each_entry(p
, &pd
->cdrw
.pkt_active_list
, list
) {
1254 if (p
->sector
== zone
) {
1261 node
= pkt_rbtree_next(node
);
1263 n
= rb_first(&pd
->bio_queue
);
1265 node
= rb_entry(n
, struct pkt_rb_node
, rb_node
);
1267 if (node
== first_node
)
1270 spin_unlock(&pd
->lock
);
1272 VPRINTK("handle_queue: no bio\n");
1276 pkt
= pkt_get_packet_data(pd
, zone
);
1278 pd
->current_sector
= zone
+ pd
->settings
.size
;
1280 BUG_ON(pkt
->frames
!= pd
->settings
.size
>> 2);
1281 pkt
->write_size
= 0;
1284 * Scan work queue for bios in the same zone and link them
1287 spin_lock(&pd
->lock
);
1288 VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone
);
1289 while ((node
= pkt_rbtree_find(pd
, zone
)) != NULL
) {
1291 VPRINTK("pkt_handle_queue: found zone=%llx\n",
1292 (unsigned long long)ZONE(bio
->bi_sector
, pd
));
1293 if (ZONE(bio
->bi_sector
, pd
) != zone
)
1295 pkt_rbtree_erase(pd
, node
);
1296 spin_lock(&pkt
->lock
);
1297 bio_list_add(&pkt
->orig_bios
, bio
);
1298 pkt
->write_size
+= bio
->bi_size
/ CD_FRAMESIZE
;
1299 spin_unlock(&pkt
->lock
);
1301 /* check write congestion marks, and if bio_queue_size is
1302 below, wake up any waiters */
1303 wakeup
= (pd
->write_congestion_on
> 0
1304 && pd
->bio_queue_size
<= pd
->write_congestion_off
);
1305 spin_unlock(&pd
->lock
);
1307 clear_bdi_congested(&pd
->disk
->queue
->backing_dev_info
,
1311 pkt
->sleep_time
= max(PACKET_WAIT_TIME
, 1);
1312 pkt_set_state(pkt
, PACKET_WAITING_STATE
);
1313 atomic_set(&pkt
->run_sm
, 1);
1315 spin_lock(&pd
->cdrw
.active_list_lock
);
1316 list_add(&pkt
->list
, &pd
->cdrw
.pkt_active_list
);
1317 spin_unlock(&pd
->cdrw
.active_list_lock
);
1323 * Assemble a bio to write one packet and queue the bio for processing
1324 * by the underlying block device.
1326 static void pkt_start_write(struct pktcdvd_device
*pd
, struct packet_data
*pkt
)
1331 struct bio_vec
*bvec
= pkt
->w_bio
->bi_io_vec
;
1333 for (f
= 0; f
< pkt
->frames
; f
++) {
1334 bvec
[f
].bv_page
= pkt
->pages
[(f
* CD_FRAMESIZE
) / PAGE_SIZE
];
1335 bvec
[f
].bv_offset
= (f
* CD_FRAMESIZE
) % PAGE_SIZE
;
1339 * Fill-in bvec with data from orig_bios.
1342 spin_lock(&pkt
->lock
);
1343 bio_list_for_each(bio
, &pkt
->orig_bios
) {
1344 int segment
= bio
->bi_idx
;
1346 int first_frame
= (bio
->bi_sector
- pkt
->sector
) / (CD_FRAMESIZE
>> 9);
1347 int num_frames
= bio
->bi_size
/ CD_FRAMESIZE
;
1348 BUG_ON(first_frame
< 0);
1349 BUG_ON(first_frame
+ num_frames
> pkt
->frames
);
1350 for (f
= first_frame
; f
< first_frame
+ num_frames
; f
++) {
1351 struct bio_vec
*src_bvl
= bio_iovec_idx(bio
, segment
);
1353 while (src_offs
>= src_bvl
->bv_len
) {
1354 src_offs
-= src_bvl
->bv_len
;
1356 BUG_ON(segment
>= bio
->bi_vcnt
);
1357 src_bvl
= bio_iovec_idx(bio
, segment
);
1360 if (src_bvl
->bv_len
- src_offs
>= CD_FRAMESIZE
) {
1361 bvec
[f
].bv_page
= src_bvl
->bv_page
;
1362 bvec
[f
].bv_offset
= src_bvl
->bv_offset
+ src_offs
;
1364 pkt_copy_bio_data(bio
, segment
, src_offs
,
1365 bvec
[f
].bv_page
, bvec
[f
].bv_offset
);
1367 src_offs
+= CD_FRAMESIZE
;
1371 pkt_set_state(pkt
, PACKET_WRITE_WAIT_STATE
);
1372 spin_unlock(&pkt
->lock
);
1374 VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
1375 frames_write
, (unsigned long long)pkt
->sector
);
1376 BUG_ON(frames_write
!= pkt
->write_size
);
1378 if (test_bit(PACKET_MERGE_SEGS
, &pd
->flags
) || (pkt
->write_size
< pkt
->frames
)) {
1379 pkt_make_local_copy(pkt
, bvec
);
1380 pkt
->cache_valid
= 1;
1382 pkt
->cache_valid
= 0;
1385 /* Start the write request */
1386 bio_reset(pkt
->w_bio
);
1387 pkt
->w_bio
->bi_sector
= pkt
->sector
;
1388 pkt
->w_bio
->bi_bdev
= pd
->bdev
;
1389 pkt
->w_bio
->bi_end_io
= pkt_end_io_packet_write
;
1390 pkt
->w_bio
->bi_private
= pkt
;
1391 for (f
= 0; f
< pkt
->frames
; f
++)
1392 if (!bio_add_page(pkt
->w_bio
, bvec
[f
].bv_page
, CD_FRAMESIZE
, bvec
[f
].bv_offset
))
1394 VPRINTK(DRIVER_NAME
": vcnt=%d\n", pkt
->w_bio
->bi_vcnt
);
1396 atomic_set(&pkt
->io_wait
, 1);
1397 pkt
->w_bio
->bi_rw
= WRITE
;
1398 pkt_queue_bio(pd
, pkt
->w_bio
);
1401 static void pkt_finish_packet(struct packet_data
*pkt
, int uptodate
)
1406 pkt
->cache_valid
= 0;
1408 /* Finish all bios corresponding to this packet */
1409 while ((bio
= bio_list_pop(&pkt
->orig_bios
)))
1410 bio_endio(bio
, uptodate
? 0 : -EIO
);
1413 static void pkt_run_state_machine(struct pktcdvd_device
*pd
, struct packet_data
*pkt
)
1417 VPRINTK("run_state_machine: pkt %d\n", pkt
->id
);
1420 switch (pkt
->state
) {
1421 case PACKET_WAITING_STATE
:
1422 if ((pkt
->write_size
< pkt
->frames
) && (pkt
->sleep_time
> 0))
1425 pkt
->sleep_time
= 0;
1426 pkt_gather_data(pd
, pkt
);
1427 pkt_set_state(pkt
, PACKET_READ_WAIT_STATE
);
1430 case PACKET_READ_WAIT_STATE
:
1431 if (atomic_read(&pkt
->io_wait
) > 0)
1434 if (atomic_read(&pkt
->io_errors
) > 0) {
1435 pkt_set_state(pkt
, PACKET_RECOVERY_STATE
);
1437 pkt_start_write(pd
, pkt
);
1441 case PACKET_WRITE_WAIT_STATE
:
1442 if (atomic_read(&pkt
->io_wait
) > 0)
1445 if (test_bit(BIO_UPTODATE
, &pkt
->w_bio
->bi_flags
)) {
1446 pkt_set_state(pkt
, PACKET_FINISHED_STATE
);
1448 pkt_set_state(pkt
, PACKET_RECOVERY_STATE
);
1452 case PACKET_RECOVERY_STATE
:
1453 if (pkt_start_recovery(pkt
)) {
1454 pkt_start_write(pd
, pkt
);
1456 VPRINTK("No recovery possible\n");
1457 pkt_set_state(pkt
, PACKET_FINISHED_STATE
);
1461 case PACKET_FINISHED_STATE
:
1462 uptodate
= test_bit(BIO_UPTODATE
, &pkt
->w_bio
->bi_flags
);
1463 pkt_finish_packet(pkt
, uptodate
);
1473 static void pkt_handle_packets(struct pktcdvd_device
*pd
)
1475 struct packet_data
*pkt
, *next
;
1477 VPRINTK("pkt_handle_packets\n");
1480 * Run state machine for active packets
1482 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1483 if (atomic_read(&pkt
->run_sm
) > 0) {
1484 atomic_set(&pkt
->run_sm
, 0);
1485 pkt_run_state_machine(pd
, pkt
);
1490 * Move no longer active packets to the free list
1492 spin_lock(&pd
->cdrw
.active_list_lock
);
1493 list_for_each_entry_safe(pkt
, next
, &pd
->cdrw
.pkt_active_list
, list
) {
1494 if (pkt
->state
== PACKET_FINISHED_STATE
) {
1495 list_del(&pkt
->list
);
1496 pkt_put_packet_data(pd
, pkt
);
1497 pkt_set_state(pkt
, PACKET_IDLE_STATE
);
1498 atomic_set(&pd
->scan_queue
, 1);
1501 spin_unlock(&pd
->cdrw
.active_list_lock
);
1504 static void pkt_count_states(struct pktcdvd_device
*pd
, int *states
)
1506 struct packet_data
*pkt
;
1509 for (i
= 0; i
< PACKET_NUM_STATES
; i
++)
1512 spin_lock(&pd
->cdrw
.active_list_lock
);
1513 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1514 states
[pkt
->state
]++;
1516 spin_unlock(&pd
->cdrw
.active_list_lock
);
1520 * kcdrwd is woken up when writes have been queued for one of our
1521 * registered devices
1523 static int kcdrwd(void *foobar
)
1525 struct pktcdvd_device
*pd
= foobar
;
1526 struct packet_data
*pkt
;
1527 long min_sleep_time
, residue
;
1529 set_user_nice(current
, -20);
1533 DECLARE_WAITQUEUE(wait
, current
);
1536 * Wait until there is something to do
1538 add_wait_queue(&pd
->wqueue
, &wait
);
1540 set_current_state(TASK_INTERRUPTIBLE
);
1542 /* Check if we need to run pkt_handle_queue */
1543 if (atomic_read(&pd
->scan_queue
) > 0)
1546 /* Check if we need to run the state machine for some packet */
1547 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1548 if (atomic_read(&pkt
->run_sm
) > 0)
1552 /* Check if we need to process the iosched queues */
1553 if (atomic_read(&pd
->iosched
.attention
) != 0)
1556 /* Otherwise, go to sleep */
1557 if (PACKET_DEBUG
> 1) {
1558 int states
[PACKET_NUM_STATES
];
1559 pkt_count_states(pd
, states
);
1560 VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1561 states
[0], states
[1], states
[2], states
[3],
1562 states
[4], states
[5]);
1565 min_sleep_time
= MAX_SCHEDULE_TIMEOUT
;
1566 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1567 if (pkt
->sleep_time
&& pkt
->sleep_time
< min_sleep_time
)
1568 min_sleep_time
= pkt
->sleep_time
;
1571 VPRINTK("kcdrwd: sleeping\n");
1572 residue
= schedule_timeout(min_sleep_time
);
1573 VPRINTK("kcdrwd: wake up\n");
1575 /* make swsusp happy with our thread */
1578 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1579 if (!pkt
->sleep_time
)
1581 pkt
->sleep_time
-= min_sleep_time
- residue
;
1582 if (pkt
->sleep_time
<= 0) {
1583 pkt
->sleep_time
= 0;
1584 atomic_inc(&pkt
->run_sm
);
1588 if (kthread_should_stop())
1592 set_current_state(TASK_RUNNING
);
1593 remove_wait_queue(&pd
->wqueue
, &wait
);
1595 if (kthread_should_stop())
1599 * if pkt_handle_queue returns true, we can queue
1602 while (pkt_handle_queue(pd
))
1606 * Handle packet state machine
1608 pkt_handle_packets(pd
);
1611 * Handle iosched queues
1613 pkt_iosched_process_queue(pd
);
1619 static void pkt_print_settings(struct pktcdvd_device
*pd
)
1621 printk(DRIVER_NAME
": %s packets, ", pd
->settings
.fp
? "Fixed" : "Variable");
1622 printk("%u blocks, ", pd
->settings
.size
>> 2);
1623 printk("Mode-%c disc\n", pd
->settings
.block_mode
== 8 ? '1' : '2');
1626 static int pkt_mode_sense(struct pktcdvd_device
*pd
, struct packet_command
*cgc
, int page_code
, int page_control
)
1628 memset(cgc
->cmd
, 0, sizeof(cgc
->cmd
));
1630 cgc
->cmd
[0] = GPCMD_MODE_SENSE_10
;
1631 cgc
->cmd
[2] = page_code
| (page_control
<< 6);
1632 cgc
->cmd
[7] = cgc
->buflen
>> 8;
1633 cgc
->cmd
[8] = cgc
->buflen
& 0xff;
1634 cgc
->data_direction
= CGC_DATA_READ
;
1635 return pkt_generic_packet(pd
, cgc
);
1638 static int pkt_mode_select(struct pktcdvd_device
*pd
, struct packet_command
*cgc
)
1640 memset(cgc
->cmd
, 0, sizeof(cgc
->cmd
));
1641 memset(cgc
->buffer
, 0, 2);
1642 cgc
->cmd
[0] = GPCMD_MODE_SELECT_10
;
1643 cgc
->cmd
[1] = 0x10; /* PF */
1644 cgc
->cmd
[7] = cgc
->buflen
>> 8;
1645 cgc
->cmd
[8] = cgc
->buflen
& 0xff;
1646 cgc
->data_direction
= CGC_DATA_WRITE
;
1647 return pkt_generic_packet(pd
, cgc
);
1650 static int pkt_get_disc_info(struct pktcdvd_device
*pd
, disc_information
*di
)
1652 struct packet_command cgc
;
1655 /* set up command and get the disc info */
1656 init_cdrom_command(&cgc
, di
, sizeof(*di
), CGC_DATA_READ
);
1657 cgc
.cmd
[0] = GPCMD_READ_DISC_INFO
;
1658 cgc
.cmd
[8] = cgc
.buflen
= 2;
1661 if ((ret
= pkt_generic_packet(pd
, &cgc
)))
1664 /* not all drives have the same disc_info length, so requeue
1665 * packet with the length the drive tells us it can supply
1667 cgc
.buflen
= be16_to_cpu(di
->disc_information_length
) +
1668 sizeof(di
->disc_information_length
);
1670 if (cgc
.buflen
> sizeof(disc_information
))
1671 cgc
.buflen
= sizeof(disc_information
);
1673 cgc
.cmd
[8] = cgc
.buflen
;
1674 return pkt_generic_packet(pd
, &cgc
);
1677 static int pkt_get_track_info(struct pktcdvd_device
*pd
, __u16 track
, __u8 type
, track_information
*ti
)
1679 struct packet_command cgc
;
1682 init_cdrom_command(&cgc
, ti
, 8, CGC_DATA_READ
);
1683 cgc
.cmd
[0] = GPCMD_READ_TRACK_RZONE_INFO
;
1684 cgc
.cmd
[1] = type
& 3;
1685 cgc
.cmd
[4] = (track
& 0xff00) >> 8;
1686 cgc
.cmd
[5] = track
& 0xff;
1690 if ((ret
= pkt_generic_packet(pd
, &cgc
)))
1693 cgc
.buflen
= be16_to_cpu(ti
->track_information_length
) +
1694 sizeof(ti
->track_information_length
);
1696 if (cgc
.buflen
> sizeof(track_information
))
1697 cgc
.buflen
= sizeof(track_information
);
1699 cgc
.cmd
[8] = cgc
.buflen
;
1700 return pkt_generic_packet(pd
, &cgc
);
1703 static noinline_for_stack
int pkt_get_last_written(struct pktcdvd_device
*pd
,
1706 disc_information di
;
1707 track_information ti
;
1711 if ((ret
= pkt_get_disc_info(pd
, &di
)))
1714 last_track
= (di
.last_track_msb
<< 8) | di
.last_track_lsb
;
1715 if ((ret
= pkt_get_track_info(pd
, last_track
, 1, &ti
)))
1718 /* if this track is blank, try the previous. */
1721 if ((ret
= pkt_get_track_info(pd
, last_track
, 1, &ti
)))
1725 /* if last recorded field is valid, return it. */
1727 *last_written
= be32_to_cpu(ti
.last_rec_address
);
1729 /* make it up instead */
1730 *last_written
= be32_to_cpu(ti
.track_start
) +
1731 be32_to_cpu(ti
.track_size
);
1733 *last_written
-= (be32_to_cpu(ti
.free_blocks
) + 7);
1739 * write mode select package based on pd->settings
1741 static noinline_for_stack
int pkt_set_write_settings(struct pktcdvd_device
*pd
)
1743 struct packet_command cgc
;
1744 struct request_sense sense
;
1745 write_param_page
*wp
;
1749 /* doesn't apply to DVD+RW or DVD-RAM */
1750 if ((pd
->mmc3_profile
== 0x1a) || (pd
->mmc3_profile
== 0x12))
1753 memset(buffer
, 0, sizeof(buffer
));
1754 init_cdrom_command(&cgc
, buffer
, sizeof(*wp
), CGC_DATA_READ
);
1756 if ((ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_WRITE_PARMS_PAGE
, 0))) {
1757 pkt_dump_sense(&cgc
);
1761 size
= 2 + ((buffer
[0] << 8) | (buffer
[1] & 0xff));
1762 pd
->mode_offset
= (buffer
[6] << 8) | (buffer
[7] & 0xff);
1763 if (size
> sizeof(buffer
))
1764 size
= sizeof(buffer
);
1769 init_cdrom_command(&cgc
, buffer
, size
, CGC_DATA_READ
);
1771 if ((ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_WRITE_PARMS_PAGE
, 0))) {
1772 pkt_dump_sense(&cgc
);
1777 * write page is offset header + block descriptor length
1779 wp
= (write_param_page
*) &buffer
[sizeof(struct mode_page_header
) + pd
->mode_offset
];
1781 wp
->fp
= pd
->settings
.fp
;
1782 wp
->track_mode
= pd
->settings
.track_mode
;
1783 wp
->write_type
= pd
->settings
.write_type
;
1784 wp
->data_block_type
= pd
->settings
.block_mode
;
1786 wp
->multi_session
= 0;
1788 #ifdef PACKET_USE_LS
1793 if (wp
->data_block_type
== PACKET_BLOCK_MODE1
) {
1794 wp
->session_format
= 0;
1796 } else if (wp
->data_block_type
== PACKET_BLOCK_MODE2
) {
1797 wp
->session_format
= 0x20;
1801 memcpy(&wp
->mcn
[1], PACKET_MCN
, sizeof(wp
->mcn
) - 1);
1807 printk(DRIVER_NAME
": write mode wrong %d\n", wp
->data_block_type
);
1810 wp
->packet_size
= cpu_to_be32(pd
->settings
.size
>> 2);
1812 cgc
.buflen
= cgc
.cmd
[8] = size
;
1813 if ((ret
= pkt_mode_select(pd
, &cgc
))) {
1814 pkt_dump_sense(&cgc
);
1818 pkt_print_settings(pd
);
1823 * 1 -- we can write to this track, 0 -- we can't
1825 static int pkt_writable_track(struct pktcdvd_device
*pd
, track_information
*ti
)
1827 switch (pd
->mmc3_profile
) {
1828 case 0x1a: /* DVD+RW */
1829 case 0x12: /* DVD-RAM */
1830 /* The track is always writable on DVD+RW/DVD-RAM */
1836 if (!ti
->packet
|| !ti
->fp
)
1840 * "good" settings as per Mt Fuji.
1842 if (ti
->rt
== 0 && ti
->blank
== 0)
1845 if (ti
->rt
== 0 && ti
->blank
== 1)
1848 if (ti
->rt
== 1 && ti
->blank
== 0)
1851 printk(DRIVER_NAME
": bad state %d-%d-%d\n", ti
->rt
, ti
->blank
, ti
->packet
);
1856 * 1 -- we can write to this disc, 0 -- we can't
1858 static int pkt_writable_disc(struct pktcdvd_device
*pd
, disc_information
*di
)
1860 switch (pd
->mmc3_profile
) {
1861 case 0x0a: /* CD-RW */
1862 case 0xffff: /* MMC3 not supported */
1864 case 0x1a: /* DVD+RW */
1865 case 0x13: /* DVD-RW */
1866 case 0x12: /* DVD-RAM */
1869 VPRINTK(DRIVER_NAME
": Wrong disc profile (%x)\n", pd
->mmc3_profile
);
1874 * for disc type 0xff we should probably reserve a new track.
1875 * but i'm not sure, should we leave this to user apps? probably.
1877 if (di
->disc_type
== 0xff) {
1878 printk(DRIVER_NAME
": Unknown disc. No track?\n");
1882 if (di
->disc_type
!= 0x20 && di
->disc_type
!= 0) {
1883 printk(DRIVER_NAME
": Wrong disc type (%x)\n", di
->disc_type
);
1887 if (di
->erasable
== 0) {
1888 printk(DRIVER_NAME
": Disc not erasable\n");
1892 if (di
->border_status
== PACKET_SESSION_RESERVED
) {
1893 printk(DRIVER_NAME
": Can't write to last track (reserved)\n");
1900 static noinline_for_stack
int pkt_probe_settings(struct pktcdvd_device
*pd
)
1902 struct packet_command cgc
;
1903 unsigned char buf
[12];
1904 disc_information di
;
1905 track_information ti
;
1908 init_cdrom_command(&cgc
, buf
, sizeof(buf
), CGC_DATA_READ
);
1909 cgc
.cmd
[0] = GPCMD_GET_CONFIGURATION
;
1911 ret
= pkt_generic_packet(pd
, &cgc
);
1912 pd
->mmc3_profile
= ret
? 0xffff : buf
[6] << 8 | buf
[7];
1914 memset(&di
, 0, sizeof(disc_information
));
1915 memset(&ti
, 0, sizeof(track_information
));
1917 if ((ret
= pkt_get_disc_info(pd
, &di
))) {
1918 printk("failed get_disc\n");
1922 if (!pkt_writable_disc(pd
, &di
))
1925 pd
->type
= di
.erasable
? PACKET_CDRW
: PACKET_CDR
;
1927 track
= 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1928 if ((ret
= pkt_get_track_info(pd
, track
, 1, &ti
))) {
1929 printk(DRIVER_NAME
": failed get_track\n");
1933 if (!pkt_writable_track(pd
, &ti
)) {
1934 printk(DRIVER_NAME
": can't write to this track\n");
1939 * we keep packet size in 512 byte units, makes it easier to
1940 * deal with request calculations.
1942 pd
->settings
.size
= be32_to_cpu(ti
.fixed_packet_size
) << 2;
1943 if (pd
->settings
.size
== 0) {
1944 printk(DRIVER_NAME
": detected zero packet size!\n");
1947 if (pd
->settings
.size
> PACKET_MAX_SECTORS
) {
1948 printk(DRIVER_NAME
": packet size is too big\n");
1951 pd
->settings
.fp
= ti
.fp
;
1952 pd
->offset
= (be32_to_cpu(ti
.track_start
) << 2) & (pd
->settings
.size
- 1);
1955 pd
->nwa
= be32_to_cpu(ti
.next_writable
);
1956 set_bit(PACKET_NWA_VALID
, &pd
->flags
);
1960 * in theory we could use lra on -RW media as well and just zero
1961 * blocks that haven't been written yet, but in practice that
1962 * is just a no-go. we'll use that for -R, naturally.
1965 pd
->lra
= be32_to_cpu(ti
.last_rec_address
);
1966 set_bit(PACKET_LRA_VALID
, &pd
->flags
);
1968 pd
->lra
= 0xffffffff;
1969 set_bit(PACKET_LRA_VALID
, &pd
->flags
);
1975 pd
->settings
.link_loss
= 7;
1976 pd
->settings
.write_type
= 0; /* packet */
1977 pd
->settings
.track_mode
= ti
.track_mode
;
1980 * mode1 or mode2 disc
1982 switch (ti
.data_mode
) {
1984 pd
->settings
.block_mode
= PACKET_BLOCK_MODE1
;
1987 pd
->settings
.block_mode
= PACKET_BLOCK_MODE2
;
1990 printk(DRIVER_NAME
": unknown data mode\n");
1997 * enable/disable write caching on drive
1999 static noinline_for_stack
int pkt_write_caching(struct pktcdvd_device
*pd
,
2002 struct packet_command cgc
;
2003 struct request_sense sense
;
2004 unsigned char buf
[64];
2007 init_cdrom_command(&cgc
, buf
, sizeof(buf
), CGC_DATA_READ
);
2009 cgc
.buflen
= pd
->mode_offset
+ 12;
2012 * caching mode page might not be there, so quiet this command
2016 if ((ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_WCACHING_PAGE
, 0)))
2019 buf
[pd
->mode_offset
+ 10] |= (!!set
<< 2);
2021 cgc
.buflen
= cgc
.cmd
[8] = 2 + ((buf
[0] << 8) | (buf
[1] & 0xff));
2022 ret
= pkt_mode_select(pd
, &cgc
);
2024 printk(DRIVER_NAME
": write caching control failed\n");
2025 pkt_dump_sense(&cgc
);
2026 } else if (!ret
&& set
)
2027 printk(DRIVER_NAME
": enabled write caching on %s\n", pd
->name
);
2031 static int pkt_lock_door(struct pktcdvd_device
*pd
, int lockflag
)
2033 struct packet_command cgc
;
2035 init_cdrom_command(&cgc
, NULL
, 0, CGC_DATA_NONE
);
2036 cgc
.cmd
[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL
;
2037 cgc
.cmd
[4] = lockflag
? 1 : 0;
2038 return pkt_generic_packet(pd
, &cgc
);
2042 * Returns drive maximum write speed
2044 static noinline_for_stack
int pkt_get_max_speed(struct pktcdvd_device
*pd
,
2045 unsigned *write_speed
)
2047 struct packet_command cgc
;
2048 struct request_sense sense
;
2049 unsigned char buf
[256+18];
2050 unsigned char *cap_buf
;
2053 cap_buf
= &buf
[sizeof(struct mode_page_header
) + pd
->mode_offset
];
2054 init_cdrom_command(&cgc
, buf
, sizeof(buf
), CGC_DATA_UNKNOWN
);
2057 ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_CAPABILITIES_PAGE
, 0);
2059 cgc
.buflen
= pd
->mode_offset
+ cap_buf
[1] + 2 +
2060 sizeof(struct mode_page_header
);
2061 ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_CAPABILITIES_PAGE
, 0);
2063 pkt_dump_sense(&cgc
);
2068 offset
= 20; /* Obsoleted field, used by older drives */
2069 if (cap_buf
[1] >= 28)
2070 offset
= 28; /* Current write speed selected */
2071 if (cap_buf
[1] >= 30) {
2072 /* If the drive reports at least one "Logical Unit Write
2073 * Speed Performance Descriptor Block", use the information
2074 * in the first block. (contains the highest speed)
2076 int num_spdb
= (cap_buf
[30] << 8) + cap_buf
[31];
2081 *write_speed
= (cap_buf
[offset
] << 8) | cap_buf
[offset
+ 1];
2085 /* These tables from cdrecord - I don't have orange book */
2086 /* standard speed CD-RW (1-4x) */
2087 static char clv_to_speed
[16] = {
2088 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2089 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2091 /* high speed CD-RW (-10x) */
2092 static char hs_clv_to_speed
[16] = {
2093 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2094 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2096 /* ultra high speed CD-RW */
2097 static char us_clv_to_speed
[16] = {
2098 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2099 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
2103 * reads the maximum media speed from ATIP
2105 static noinline_for_stack
int pkt_media_speed(struct pktcdvd_device
*pd
,
2108 struct packet_command cgc
;
2109 struct request_sense sense
;
2110 unsigned char buf
[64];
2111 unsigned int size
, st
, sp
;
2114 init_cdrom_command(&cgc
, buf
, 2, CGC_DATA_READ
);
2116 cgc
.cmd
[0] = GPCMD_READ_TOC_PMA_ATIP
;
2118 cgc
.cmd
[2] = 4; /* READ ATIP */
2120 ret
= pkt_generic_packet(pd
, &cgc
);
2122 pkt_dump_sense(&cgc
);
2125 size
= ((unsigned int) buf
[0]<<8) + buf
[1] + 2;
2126 if (size
> sizeof(buf
))
2129 init_cdrom_command(&cgc
, buf
, size
, CGC_DATA_READ
);
2131 cgc
.cmd
[0] = GPCMD_READ_TOC_PMA_ATIP
;
2135 ret
= pkt_generic_packet(pd
, &cgc
);
2137 pkt_dump_sense(&cgc
);
2141 if (!(buf
[6] & 0x40)) {
2142 printk(DRIVER_NAME
": Disc type is not CD-RW\n");
2145 if (!(buf
[6] & 0x4)) {
2146 printk(DRIVER_NAME
": A1 values on media are not valid, maybe not CDRW?\n");
2150 st
= (buf
[6] >> 3) & 0x7; /* disc sub-type */
2152 sp
= buf
[16] & 0xf; /* max speed from ATIP A1 field */
2154 /* Info from cdrecord */
2156 case 0: /* standard speed */
2157 *speed
= clv_to_speed
[sp
];
2159 case 1: /* high speed */
2160 *speed
= hs_clv_to_speed
[sp
];
2162 case 2: /* ultra high speed */
2163 *speed
= us_clv_to_speed
[sp
];
2166 printk(DRIVER_NAME
": Unknown disc sub-type %d\n",st
);
2170 printk(DRIVER_NAME
": Max. media speed: %d\n",*speed
);
2173 printk(DRIVER_NAME
": Unknown speed %d for sub-type %d\n",sp
,st
);
2178 static noinline_for_stack
int pkt_perform_opc(struct pktcdvd_device
*pd
)
2180 struct packet_command cgc
;
2181 struct request_sense sense
;
2184 VPRINTK(DRIVER_NAME
": Performing OPC\n");
2186 init_cdrom_command(&cgc
, NULL
, 0, CGC_DATA_NONE
);
2188 cgc
.timeout
= 60*HZ
;
2189 cgc
.cmd
[0] = GPCMD_SEND_OPC
;
2191 if ((ret
= pkt_generic_packet(pd
, &cgc
)))
2192 pkt_dump_sense(&cgc
);
2196 static int pkt_open_write(struct pktcdvd_device
*pd
)
2199 unsigned int write_speed
, media_write_speed
, read_speed
;
2201 if ((ret
= pkt_probe_settings(pd
))) {
2202 VPRINTK(DRIVER_NAME
": %s failed probe\n", pd
->name
);
2206 if ((ret
= pkt_set_write_settings(pd
))) {
2207 DPRINTK(DRIVER_NAME
": %s failed saving write settings\n", pd
->name
);
2211 pkt_write_caching(pd
, USE_WCACHING
);
2213 if ((ret
= pkt_get_max_speed(pd
, &write_speed
)))
2214 write_speed
= 16 * 177;
2215 switch (pd
->mmc3_profile
) {
2216 case 0x13: /* DVD-RW */
2217 case 0x1a: /* DVD+RW */
2218 case 0x12: /* DVD-RAM */
2219 DPRINTK(DRIVER_NAME
": write speed %ukB/s\n", write_speed
);
2222 if ((ret
= pkt_media_speed(pd
, &media_write_speed
)))
2223 media_write_speed
= 16;
2224 write_speed
= min(write_speed
, media_write_speed
* 177);
2225 DPRINTK(DRIVER_NAME
": write speed %ux\n", write_speed
/ 176);
2228 read_speed
= write_speed
;
2230 if ((ret
= pkt_set_speed(pd
, write_speed
, read_speed
))) {
2231 DPRINTK(DRIVER_NAME
": %s couldn't set write speed\n", pd
->name
);
2234 pd
->write_speed
= write_speed
;
2235 pd
->read_speed
= read_speed
;
2237 if ((ret
= pkt_perform_opc(pd
))) {
2238 DPRINTK(DRIVER_NAME
": %s Optimum Power Calibration failed\n", pd
->name
);
2245 * called at open time.
2247 static int pkt_open_dev(struct pktcdvd_device
*pd
, fmode_t write
)
2251 struct request_queue
*q
;
2254 * We need to re-open the cdrom device without O_NONBLOCK to be able
2255 * to read/write from/to it. It is already opened in O_NONBLOCK mode
2256 * so bdget() can't fail.
2258 bdget(pd
->bdev
->bd_dev
);
2259 if ((ret
= blkdev_get(pd
->bdev
, FMODE_READ
| FMODE_EXCL
, pd
)))
2262 if ((ret
= pkt_get_last_written(pd
, &lba
))) {
2263 printk(DRIVER_NAME
": pkt_get_last_written failed\n");
2267 set_capacity(pd
->disk
, lba
<< 2);
2268 set_capacity(pd
->bdev
->bd_disk
, lba
<< 2);
2269 bd_set_size(pd
->bdev
, (loff_t
)lba
<< 11);
2271 q
= bdev_get_queue(pd
->bdev
);
2273 if ((ret
= pkt_open_write(pd
)))
2276 * Some CDRW drives can not handle writes larger than one packet,
2277 * even if the size is a multiple of the packet size.
2279 spin_lock_irq(q
->queue_lock
);
2280 blk_queue_max_hw_sectors(q
, pd
->settings
.size
);
2281 spin_unlock_irq(q
->queue_lock
);
2282 set_bit(PACKET_WRITABLE
, &pd
->flags
);
2284 pkt_set_speed(pd
, MAX_SPEED
, MAX_SPEED
);
2285 clear_bit(PACKET_WRITABLE
, &pd
->flags
);
2288 if ((ret
= pkt_set_segment_merging(pd
, q
)))
2292 if (!pkt_grow_pktlist(pd
, CONFIG_CDROM_PKTCDVD_BUFFERS
)) {
2293 printk(DRIVER_NAME
": not enough memory for buffers\n");
2297 printk(DRIVER_NAME
": %lukB available on disc\n", lba
<< 1);
2303 blkdev_put(pd
->bdev
, FMODE_READ
| FMODE_EXCL
);
2309 * called when the device is closed. makes sure that the device flushes
2310 * the internal cache before we close.
2312 static void pkt_release_dev(struct pktcdvd_device
*pd
, int flush
)
2314 if (flush
&& pkt_flush_cache(pd
))
2315 DPRINTK(DRIVER_NAME
": %s not flushing cache\n", pd
->name
);
2317 pkt_lock_door(pd
, 0);
2319 pkt_set_speed(pd
, MAX_SPEED
, MAX_SPEED
);
2320 blkdev_put(pd
->bdev
, FMODE_READ
| FMODE_EXCL
);
2322 pkt_shrink_pktlist(pd
);
2325 static struct pktcdvd_device
*pkt_find_dev_from_minor(unsigned int dev_minor
)
2327 if (dev_minor
>= MAX_WRITERS
)
2329 return pkt_devs
[dev_minor
];
2332 static int pkt_open(struct block_device
*bdev
, fmode_t mode
)
2334 struct pktcdvd_device
*pd
= NULL
;
2337 VPRINTK(DRIVER_NAME
": entering open\n");
2339 mutex_lock(&pktcdvd_mutex
);
2340 mutex_lock(&ctl_mutex
);
2341 pd
= pkt_find_dev_from_minor(MINOR(bdev
->bd_dev
));
2346 BUG_ON(pd
->refcnt
< 0);
2349 if (pd
->refcnt
> 1) {
2350 if ((mode
& FMODE_WRITE
) &&
2351 !test_bit(PACKET_WRITABLE
, &pd
->flags
)) {
2356 ret
= pkt_open_dev(pd
, mode
& FMODE_WRITE
);
2360 * needed here as well, since ext2 (among others) may change
2361 * the blocksize at mount time
2363 set_blocksize(bdev
, CD_FRAMESIZE
);
2366 mutex_unlock(&ctl_mutex
);
2367 mutex_unlock(&pktcdvd_mutex
);
2373 VPRINTK(DRIVER_NAME
": failed open (%d)\n", ret
);
2374 mutex_unlock(&ctl_mutex
);
2375 mutex_unlock(&pktcdvd_mutex
);
2379 static int pkt_close(struct gendisk
*disk
, fmode_t mode
)
2381 struct pktcdvd_device
*pd
= disk
->private_data
;
2384 mutex_lock(&pktcdvd_mutex
);
2385 mutex_lock(&ctl_mutex
);
2387 BUG_ON(pd
->refcnt
< 0);
2388 if (pd
->refcnt
== 0) {
2389 int flush
= test_bit(PACKET_WRITABLE
, &pd
->flags
);
2390 pkt_release_dev(pd
, flush
);
2392 mutex_unlock(&ctl_mutex
);
2393 mutex_unlock(&pktcdvd_mutex
);
2398 static void pkt_end_io_read_cloned(struct bio
*bio
, int err
)
2400 struct packet_stacked_data
*psd
= bio
->bi_private
;
2401 struct pktcdvd_device
*pd
= psd
->pd
;
2404 bio_endio(psd
->bio
, err
);
2405 mempool_free(psd
, psd_pool
);
2406 pkt_bio_finished(pd
);
2409 static void pkt_make_request(struct request_queue
*q
, struct bio
*bio
)
2411 struct pktcdvd_device
*pd
;
2412 char b
[BDEVNAME_SIZE
];
2414 struct packet_data
*pkt
;
2415 int was_empty
, blocked_bio
;
2416 struct pkt_rb_node
*node
;
2420 printk(DRIVER_NAME
": %s incorrect request queue\n", bdevname(bio
->bi_bdev
, b
));
2425 * Clone READ bios so we can have our own bi_end_io callback.
2427 if (bio_data_dir(bio
) == READ
) {
2428 struct bio
*cloned_bio
= bio_clone(bio
, GFP_NOIO
);
2429 struct packet_stacked_data
*psd
= mempool_alloc(psd_pool
, GFP_NOIO
);
2433 cloned_bio
->bi_bdev
= pd
->bdev
;
2434 cloned_bio
->bi_private
= psd
;
2435 cloned_bio
->bi_end_io
= pkt_end_io_read_cloned
;
2436 pd
->stats
.secs_r
+= bio
->bi_size
>> 9;
2437 pkt_queue_bio(pd
, cloned_bio
);
2441 if (!test_bit(PACKET_WRITABLE
, &pd
->flags
)) {
2442 printk(DRIVER_NAME
": WRITE for ro device %s (%llu)\n",
2443 pd
->name
, (unsigned long long)bio
->bi_sector
);
2447 if (!bio
->bi_size
|| (bio
->bi_size
% CD_FRAMESIZE
)) {
2448 printk(DRIVER_NAME
": wrong bio size\n");
2452 blk_queue_bounce(q
, &bio
);
2454 zone
= ZONE(bio
->bi_sector
, pd
);
2455 VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
2456 (unsigned long long)bio
->bi_sector
,
2457 (unsigned long long)(bio
->bi_sector
+ bio_sectors(bio
)));
2459 /* Check if we have to split the bio */
2461 struct bio_pair
*bp
;
2465 last_zone
= ZONE(bio
->bi_sector
+ bio_sectors(bio
) - 1, pd
);
2466 if (last_zone
!= zone
) {
2467 BUG_ON(last_zone
!= zone
+ pd
->settings
.size
);
2468 first_sectors
= last_zone
- bio
->bi_sector
;
2469 bp
= bio_split(bio
, first_sectors
);
2471 pkt_make_request(q
, &bp
->bio1
);
2472 pkt_make_request(q
, &bp
->bio2
);
2473 bio_pair_release(bp
);
2479 * If we find a matching packet in state WAITING or READ_WAIT, we can
2480 * just append this bio to that packet.
2482 spin_lock(&pd
->cdrw
.active_list_lock
);
2484 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
2485 if (pkt
->sector
== zone
) {
2486 spin_lock(&pkt
->lock
);
2487 if ((pkt
->state
== PACKET_WAITING_STATE
) ||
2488 (pkt
->state
== PACKET_READ_WAIT_STATE
)) {
2489 bio_list_add(&pkt
->orig_bios
, bio
);
2490 pkt
->write_size
+= bio
->bi_size
/ CD_FRAMESIZE
;
2491 if ((pkt
->write_size
>= pkt
->frames
) &&
2492 (pkt
->state
== PACKET_WAITING_STATE
)) {
2493 atomic_inc(&pkt
->run_sm
);
2494 wake_up(&pd
->wqueue
);
2496 spin_unlock(&pkt
->lock
);
2497 spin_unlock(&pd
->cdrw
.active_list_lock
);
2502 spin_unlock(&pkt
->lock
);
2505 spin_unlock(&pd
->cdrw
.active_list_lock
);
2508 * Test if there is enough room left in the bio work queue
2509 * (queue size >= congestion on mark).
2510 * If not, wait till the work queue size is below the congestion off mark.
2512 spin_lock(&pd
->lock
);
2513 if (pd
->write_congestion_on
> 0
2514 && pd
->bio_queue_size
>= pd
->write_congestion_on
) {
2515 set_bdi_congested(&q
->backing_dev_info
, BLK_RW_ASYNC
);
2517 spin_unlock(&pd
->lock
);
2518 congestion_wait(BLK_RW_ASYNC
, HZ
);
2519 spin_lock(&pd
->lock
);
2520 } while(pd
->bio_queue_size
> pd
->write_congestion_off
);
2522 spin_unlock(&pd
->lock
);
2525 * No matching packet found. Store the bio in the work queue.
2527 node
= mempool_alloc(pd
->rb_pool
, GFP_NOIO
);
2529 spin_lock(&pd
->lock
);
2530 BUG_ON(pd
->bio_queue_size
< 0);
2531 was_empty
= (pd
->bio_queue_size
== 0);
2532 pkt_rbtree_insert(pd
, node
);
2533 spin_unlock(&pd
->lock
);
2536 * Wake up the worker thread.
2538 atomic_set(&pd
->scan_queue
, 1);
2540 /* This wake_up is required for correct operation */
2541 wake_up(&pd
->wqueue
);
2542 } else if (!list_empty(&pd
->cdrw
.pkt_free_list
) && !blocked_bio
) {
2544 * This wake up is not required for correct operation,
2545 * but improves performance in some cases.
2547 wake_up(&pd
->wqueue
);
2556 static int pkt_merge_bvec(struct request_queue
*q
, struct bvec_merge_data
*bmd
,
2557 struct bio_vec
*bvec
)
2559 struct pktcdvd_device
*pd
= q
->queuedata
;
2560 sector_t zone
= ZONE(bmd
->bi_sector
, pd
);
2561 int used
= ((bmd
->bi_sector
- zone
) << 9) + bmd
->bi_size
;
2562 int remaining
= (pd
->settings
.size
<< 9) - used
;
2566 * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
2567 * boundary, pkt_make_request() will split the bio.
2569 remaining2
= PAGE_SIZE
- bmd
->bi_size
;
2570 remaining
= max(remaining
, remaining2
);
2572 BUG_ON(remaining
< 0);
2576 static void pkt_init_queue(struct pktcdvd_device
*pd
)
2578 struct request_queue
*q
= pd
->disk
->queue
;
2580 blk_queue_make_request(q
, pkt_make_request
);
2581 blk_queue_logical_block_size(q
, CD_FRAMESIZE
);
2582 blk_queue_max_hw_sectors(q
, PACKET_MAX_SECTORS
);
2583 blk_queue_merge_bvec(q
, pkt_merge_bvec
);
2587 static int pkt_seq_show(struct seq_file
*m
, void *p
)
2589 struct pktcdvd_device
*pd
= m
->private;
2591 char bdev_buf
[BDEVNAME_SIZE
];
2592 int states
[PACKET_NUM_STATES
];
2594 seq_printf(m
, "Writer %s mapped to %s:\n", pd
->name
,
2595 bdevname(pd
->bdev
, bdev_buf
));
2597 seq_printf(m
, "\nSettings:\n");
2598 seq_printf(m
, "\tpacket size:\t\t%dkB\n", pd
->settings
.size
/ 2);
2600 if (pd
->settings
.write_type
== 0)
2604 seq_printf(m
, "\twrite type:\t\t%s\n", msg
);
2606 seq_printf(m
, "\tpacket type:\t\t%s\n", pd
->settings
.fp
? "Fixed" : "Variable");
2607 seq_printf(m
, "\tlink loss:\t\t%d\n", pd
->settings
.link_loss
);
2609 seq_printf(m
, "\ttrack mode:\t\t%d\n", pd
->settings
.track_mode
);
2611 if (pd
->settings
.block_mode
== PACKET_BLOCK_MODE1
)
2613 else if (pd
->settings
.block_mode
== PACKET_BLOCK_MODE2
)
2617 seq_printf(m
, "\tblock mode:\t\t%s\n", msg
);
2619 seq_printf(m
, "\nStatistics:\n");
2620 seq_printf(m
, "\tpackets started:\t%lu\n", pd
->stats
.pkt_started
);
2621 seq_printf(m
, "\tpackets ended:\t\t%lu\n", pd
->stats
.pkt_ended
);
2622 seq_printf(m
, "\twritten:\t\t%lukB\n", pd
->stats
.secs_w
>> 1);
2623 seq_printf(m
, "\tread gather:\t\t%lukB\n", pd
->stats
.secs_rg
>> 1);
2624 seq_printf(m
, "\tread:\t\t\t%lukB\n", pd
->stats
.secs_r
>> 1);
2626 seq_printf(m
, "\nMisc:\n");
2627 seq_printf(m
, "\treference count:\t%d\n", pd
->refcnt
);
2628 seq_printf(m
, "\tflags:\t\t\t0x%lx\n", pd
->flags
);
2629 seq_printf(m
, "\tread speed:\t\t%ukB/s\n", pd
->read_speed
);
2630 seq_printf(m
, "\twrite speed:\t\t%ukB/s\n", pd
->write_speed
);
2631 seq_printf(m
, "\tstart offset:\t\t%lu\n", pd
->offset
);
2632 seq_printf(m
, "\tmode page offset:\t%u\n", pd
->mode_offset
);
2634 seq_printf(m
, "\nQueue state:\n");
2635 seq_printf(m
, "\tbios queued:\t\t%d\n", pd
->bio_queue_size
);
2636 seq_printf(m
, "\tbios pending:\t\t%d\n", atomic_read(&pd
->cdrw
.pending_bios
));
2637 seq_printf(m
, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd
->current_sector
);
2639 pkt_count_states(pd
, states
);
2640 seq_printf(m
, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
2641 states
[0], states
[1], states
[2], states
[3], states
[4], states
[5]);
2643 seq_printf(m
, "\twrite congestion marks:\toff=%d on=%d\n",
2644 pd
->write_congestion_off
,
2645 pd
->write_congestion_on
);
2649 static int pkt_seq_open(struct inode
*inode
, struct file
*file
)
2651 return single_open(file
, pkt_seq_show
, PDE(inode
)->data
);
2654 static const struct file_operations pkt_proc_fops
= {
2655 .open
= pkt_seq_open
,
2657 .llseek
= seq_lseek
,
2658 .release
= single_release
2661 static int pkt_new_dev(struct pktcdvd_device
*pd
, dev_t dev
)
2665 char b
[BDEVNAME_SIZE
];
2666 struct block_device
*bdev
;
2668 if (pd
->pkt_dev
== dev
) {
2669 printk(DRIVER_NAME
": Recursive setup not allowed\n");
2672 for (i
= 0; i
< MAX_WRITERS
; i
++) {
2673 struct pktcdvd_device
*pd2
= pkt_devs
[i
];
2676 if (pd2
->bdev
->bd_dev
== dev
) {
2677 printk(DRIVER_NAME
": %s already setup\n", bdevname(pd2
->bdev
, b
));
2680 if (pd2
->pkt_dev
== dev
) {
2681 printk(DRIVER_NAME
": Can't chain pktcdvd devices\n");
2689 ret
= blkdev_get(bdev
, FMODE_READ
| FMODE_NDELAY
, NULL
);
2693 /* This is safe, since we have a reference from open(). */
2694 __module_get(THIS_MODULE
);
2697 set_blocksize(bdev
, CD_FRAMESIZE
);
2701 atomic_set(&pd
->cdrw
.pending_bios
, 0);
2702 pd
->cdrw
.thread
= kthread_run(kcdrwd
, pd
, "%s", pd
->name
);
2703 if (IS_ERR(pd
->cdrw
.thread
)) {
2704 printk(DRIVER_NAME
": can't start kernel thread\n");
2709 proc_create_data(pd
->name
, 0, pkt_proc
, &pkt_proc_fops
, pd
);
2710 DPRINTK(DRIVER_NAME
": writer %s mapped to %s\n", pd
->name
, bdevname(bdev
, b
));
2714 blkdev_put(bdev
, FMODE_READ
| FMODE_NDELAY
);
2715 /* This is safe: open() is still holding a reference. */
2716 module_put(THIS_MODULE
);
2720 static int pkt_ioctl(struct block_device
*bdev
, fmode_t mode
, unsigned int cmd
, unsigned long arg
)
2722 struct pktcdvd_device
*pd
= bdev
->bd_disk
->private_data
;
2725 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd
,
2726 MAJOR(bdev
->bd_dev
), MINOR(bdev
->bd_dev
));
2728 mutex_lock(&pktcdvd_mutex
);
2732 * The door gets locked when the device is opened, so we
2733 * have to unlock it or else the eject command fails.
2735 if (pd
->refcnt
== 1)
2736 pkt_lock_door(pd
, 0);
2739 * forward selected CDROM ioctls to CD-ROM, for UDF
2741 case CDROMMULTISESSION
:
2742 case CDROMREADTOCENTRY
:
2743 case CDROM_LAST_WRITTEN
:
2744 case CDROM_SEND_PACKET
:
2745 case SCSI_IOCTL_SEND_COMMAND
:
2746 ret
= __blkdev_driver_ioctl(pd
->bdev
, mode
, cmd
, arg
);
2750 VPRINTK(DRIVER_NAME
": Unknown ioctl for %s (%x)\n", pd
->name
, cmd
);
2753 mutex_unlock(&pktcdvd_mutex
);
2758 static unsigned int pkt_check_events(struct gendisk
*disk
,
2759 unsigned int clearing
)
2761 struct pktcdvd_device
*pd
= disk
->private_data
;
2762 struct gendisk
*attached_disk
;
2768 attached_disk
= pd
->bdev
->bd_disk
;
2769 if (!attached_disk
|| !attached_disk
->fops
->check_events
)
2771 return attached_disk
->fops
->check_events(attached_disk
, clearing
);
2774 static const struct block_device_operations pktcdvd_ops
= {
2775 .owner
= THIS_MODULE
,
2777 .release
= pkt_close
,
2779 .check_events
= pkt_check_events
,
2782 static char *pktcdvd_devnode(struct gendisk
*gd
, umode_t
*mode
)
2784 return kasprintf(GFP_KERNEL
, "pktcdvd/%s", gd
->disk_name
);
2788 * Set up mapping from pktcdvd device to CD-ROM device.
2790 static int pkt_setup_dev(dev_t dev
, dev_t
* pkt_dev
)
2794 struct pktcdvd_device
*pd
;
2795 struct gendisk
*disk
;
2797 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
2799 for (idx
= 0; idx
< MAX_WRITERS
; idx
++)
2802 if (idx
== MAX_WRITERS
) {
2803 printk(DRIVER_NAME
": max %d writers supported\n", MAX_WRITERS
);
2808 pd
= kzalloc(sizeof(struct pktcdvd_device
), GFP_KERNEL
);
2812 pd
->rb_pool
= mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE
,
2813 sizeof(struct pkt_rb_node
));
2817 INIT_LIST_HEAD(&pd
->cdrw
.pkt_free_list
);
2818 INIT_LIST_HEAD(&pd
->cdrw
.pkt_active_list
);
2819 spin_lock_init(&pd
->cdrw
.active_list_lock
);
2821 spin_lock_init(&pd
->lock
);
2822 spin_lock_init(&pd
->iosched
.lock
);
2823 bio_list_init(&pd
->iosched
.read_queue
);
2824 bio_list_init(&pd
->iosched
.write_queue
);
2825 sprintf(pd
->name
, DRIVER_NAME
"%d", idx
);
2826 init_waitqueue_head(&pd
->wqueue
);
2827 pd
->bio_queue
= RB_ROOT
;
2829 pd
->write_congestion_on
= write_congestion_on
;
2830 pd
->write_congestion_off
= write_congestion_off
;
2832 disk
= alloc_disk(1);
2836 disk
->major
= pktdev_major
;
2837 disk
->first_minor
= idx
;
2838 disk
->fops
= &pktcdvd_ops
;
2839 disk
->flags
= GENHD_FL_REMOVABLE
;
2840 strcpy(disk
->disk_name
, pd
->name
);
2841 disk
->devnode
= pktcdvd_devnode
;
2842 disk
->private_data
= pd
;
2843 disk
->queue
= blk_alloc_queue(GFP_KERNEL
);
2847 pd
->pkt_dev
= MKDEV(pktdev_major
, idx
);
2848 ret
= pkt_new_dev(pd
, dev
);
2852 /* inherit events of the host device */
2853 disk
->events
= pd
->bdev
->bd_disk
->events
;
2854 disk
->async_events
= pd
->bdev
->bd_disk
->async_events
;
2858 pkt_sysfs_dev_new(pd
);
2859 pkt_debugfs_dev_new(pd
);
2863 *pkt_dev
= pd
->pkt_dev
;
2865 mutex_unlock(&ctl_mutex
);
2869 blk_cleanup_queue(disk
->queue
);
2874 mempool_destroy(pd
->rb_pool
);
2877 mutex_unlock(&ctl_mutex
);
2878 printk(DRIVER_NAME
": setup of pktcdvd device failed\n");
2883 * Tear down mapping from pktcdvd device to CD-ROM device.
2885 static int pkt_remove_dev(dev_t pkt_dev
)
2887 struct pktcdvd_device
*pd
;
2891 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
2893 for (idx
= 0; idx
< MAX_WRITERS
; idx
++) {
2895 if (pd
&& (pd
->pkt_dev
== pkt_dev
))
2898 if (idx
== MAX_WRITERS
) {
2899 DPRINTK(DRIVER_NAME
": dev not setup\n");
2904 if (pd
->refcnt
> 0) {
2908 if (!IS_ERR(pd
->cdrw
.thread
))
2909 kthread_stop(pd
->cdrw
.thread
);
2911 pkt_devs
[idx
] = NULL
;
2913 pkt_debugfs_dev_remove(pd
);
2914 pkt_sysfs_dev_remove(pd
);
2916 blkdev_put(pd
->bdev
, FMODE_READ
| FMODE_NDELAY
);
2918 remove_proc_entry(pd
->name
, pkt_proc
);
2919 DPRINTK(DRIVER_NAME
": writer %s unmapped\n", pd
->name
);
2921 del_gendisk(pd
->disk
);
2922 blk_cleanup_queue(pd
->disk
->queue
);
2925 mempool_destroy(pd
->rb_pool
);
2928 /* This is safe: open() is still holding a reference. */
2929 module_put(THIS_MODULE
);
2932 mutex_unlock(&ctl_mutex
);
2936 static void pkt_get_status(struct pkt_ctrl_command
*ctrl_cmd
)
2938 struct pktcdvd_device
*pd
;
2940 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
2942 pd
= pkt_find_dev_from_minor(ctrl_cmd
->dev_index
);
2944 ctrl_cmd
->dev
= new_encode_dev(pd
->bdev
->bd_dev
);
2945 ctrl_cmd
->pkt_dev
= new_encode_dev(pd
->pkt_dev
);
2948 ctrl_cmd
->pkt_dev
= 0;
2950 ctrl_cmd
->num_devices
= MAX_WRITERS
;
2952 mutex_unlock(&ctl_mutex
);
2955 static long pkt_ctl_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
2957 void __user
*argp
= (void __user
*)arg
;
2958 struct pkt_ctrl_command ctrl_cmd
;
2962 if (cmd
!= PACKET_CTRL_CMD
)
2965 if (copy_from_user(&ctrl_cmd
, argp
, sizeof(struct pkt_ctrl_command
)))
2968 switch (ctrl_cmd
.command
) {
2969 case PKT_CTRL_CMD_SETUP
:
2970 if (!capable(CAP_SYS_ADMIN
))
2972 ret
= pkt_setup_dev(new_decode_dev(ctrl_cmd
.dev
), &pkt_dev
);
2973 ctrl_cmd
.pkt_dev
= new_encode_dev(pkt_dev
);
2975 case PKT_CTRL_CMD_TEARDOWN
:
2976 if (!capable(CAP_SYS_ADMIN
))
2978 ret
= pkt_remove_dev(new_decode_dev(ctrl_cmd
.pkt_dev
));
2980 case PKT_CTRL_CMD_STATUS
:
2981 pkt_get_status(&ctrl_cmd
);
2987 if (copy_to_user(argp
, &ctrl_cmd
, sizeof(struct pkt_ctrl_command
)))
2992 #ifdef CONFIG_COMPAT
2993 static long pkt_ctl_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
2995 return pkt_ctl_ioctl(file
, cmd
, (unsigned long)compat_ptr(arg
));
2999 static const struct file_operations pkt_ctl_fops
= {
3000 .open
= nonseekable_open
,
3001 .unlocked_ioctl
= pkt_ctl_ioctl
,
3002 #ifdef CONFIG_COMPAT
3003 .compat_ioctl
= pkt_ctl_compat_ioctl
,
3005 .owner
= THIS_MODULE
,
3006 .llseek
= no_llseek
,
3009 static struct miscdevice pkt_misc
= {
3010 .minor
= MISC_DYNAMIC_MINOR
,
3011 .name
= DRIVER_NAME
,
3012 .nodename
= "pktcdvd/control",
3013 .fops
= &pkt_ctl_fops
3016 static int __init
pkt_init(void)
3020 mutex_init(&ctl_mutex
);
3022 psd_pool
= mempool_create_kmalloc_pool(PSD_POOL_SIZE
,
3023 sizeof(struct packet_stacked_data
));
3027 ret
= register_blkdev(pktdev_major
, DRIVER_NAME
);
3029 printk(DRIVER_NAME
": Unable to register block device\n");
3035 ret
= pkt_sysfs_init();
3041 ret
= misc_register(&pkt_misc
);
3043 printk(DRIVER_NAME
": Unable to register misc device\n");
3047 pkt_proc
= proc_mkdir("driver/"DRIVER_NAME
, NULL
);
3052 pkt_debugfs_cleanup();
3053 pkt_sysfs_cleanup();
3055 unregister_blkdev(pktdev_major
, DRIVER_NAME
);
3057 mempool_destroy(psd_pool
);
3061 static void __exit
pkt_exit(void)
3063 remove_proc_entry("driver/"DRIVER_NAME
, NULL
);
3064 misc_deregister(&pkt_misc
);
3066 pkt_debugfs_cleanup();
3067 pkt_sysfs_cleanup();
3069 unregister_blkdev(pktdev_major
, DRIVER_NAME
);
3070 mempool_destroy(psd_pool
);
3073 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
3074 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
3075 MODULE_LICENSE("GPL");
3077 module_init(pkt_init
);
3078 module_exit(pkt_exit
);