palm_bk3710: use ->init_dma method
[deliverable/linux.git] / drivers / ide / ide-tape.c
1 /*
2 * IDE ATAPI streaming tape driver.
3 *
4 * Copyright (C) 1995-1999 Gadi Oxman <gadio@netvision.net.il>
5 * Copyright (C) 2003-2005 Bartlomiej Zolnierkiewicz
6 *
7 * This driver was constructed as a student project in the software laboratory
8 * of the faculty of electrical engineering in the Technion - Israel's
9 * Institute Of Technology, with the guide of Avner Lottem and Dr. Ilana David.
10 *
11 * It is hereby placed under the terms of the GNU general public license.
12 * (See linux/COPYING).
13 *
14 * For a historical changelog see
15 * Documentation/ide/ChangeLog.ide-tape.1995-2002
16 */
17
18 #define IDETAPE_VERSION "1.20"
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/string.h>
23 #include <linux/kernel.h>
24 #include <linux/delay.h>
25 #include <linux/timer.h>
26 #include <linux/mm.h>
27 #include <linux/interrupt.h>
28 #include <linux/jiffies.h>
29 #include <linux/major.h>
30 #include <linux/errno.h>
31 #include <linux/genhd.h>
32 #include <linux/slab.h>
33 #include <linux/pci.h>
34 #include <linux/ide.h>
35 #include <linux/smp_lock.h>
36 #include <linux/completion.h>
37 #include <linux/bitops.h>
38 #include <linux/mutex.h>
39 #include <scsi/scsi.h>
40
41 #include <asm/byteorder.h>
42 #include <linux/irq.h>
43 #include <linux/uaccess.h>
44 #include <linux/io.h>
45 #include <asm/unaligned.h>
46 #include <linux/mtio.h>
47
48 enum {
49 /* output errors only */
50 DBG_ERR = (1 << 0),
51 /* output all sense key/asc */
52 DBG_SENSE = (1 << 1),
53 /* info regarding all chrdev-related procedures */
54 DBG_CHRDEV = (1 << 2),
55 /* all remaining procedures */
56 DBG_PROCS = (1 << 3),
57 /* buffer alloc info (pc_stack & rq_stack) */
58 DBG_PCRQ_STACK = (1 << 4),
59 };
60
61 /* define to see debug info */
62 #define IDETAPE_DEBUG_LOG 0
63
64 #if IDETAPE_DEBUG_LOG
65 #define debug_log(lvl, fmt, args...) \
66 { \
67 if (tape->debug_mask & lvl) \
68 printk(KERN_INFO "ide-tape: " fmt, ## args); \
69 }
70 #else
71 #define debug_log(lvl, fmt, args...) do {} while (0)
72 #endif
73
74 /**************************** Tunable parameters *****************************/
75
76
77 /*
78 * Pipelined mode parameters.
79 *
80 * We try to use the minimum number of stages which is enough to keep the tape
81 * constantly streaming. To accomplish that, we implement a feedback loop around
82 * the maximum number of stages:
83 *
84 * We start from MIN maximum stages (we will not even use MIN stages if we don't
85 * need them), increment it by RATE*(MAX-MIN) whenever we sense that the
86 * pipeline is empty, until we reach the optimum value or until we reach MAX.
87 *
88 * Setting the following parameter to 0 is illegal: the pipelined mode cannot be
89 * disabled (idetape_calculate_speeds() divides by tape->max_stages.)
90 */
91 #define IDETAPE_MIN_PIPELINE_STAGES 1
92 #define IDETAPE_MAX_PIPELINE_STAGES 400
93 #define IDETAPE_INCREASE_STAGES_RATE 20
94
95 /*
96 * After each failed packet command we issue a request sense command and retry
97 * the packet command IDETAPE_MAX_PC_RETRIES times.
98 *
99 * Setting IDETAPE_MAX_PC_RETRIES to 0 will disable retries.
100 */
101 #define IDETAPE_MAX_PC_RETRIES 3
102
103 /*
104 * With each packet command, we allocate a buffer of IDETAPE_PC_BUFFER_SIZE
105 * bytes. This is used for several packet commands (Not for READ/WRITE commands)
106 */
107 #define IDETAPE_PC_BUFFER_SIZE 256
108
109 /*
110 * In various places in the driver, we need to allocate storage
111 * for packet commands and requests, which will remain valid while
112 * we leave the driver to wait for an interrupt or a timeout event.
113 */
114 #define IDETAPE_PC_STACK (10 + IDETAPE_MAX_PC_RETRIES)
115
116 /*
117 * Some drives (for example, Seagate STT3401A Travan) require a very long
118 * timeout, because they don't return an interrupt or clear their busy bit
119 * until after the command completes (even retension commands).
120 */
121 #define IDETAPE_WAIT_CMD (900*HZ)
122
123 /*
124 * The following parameter is used to select the point in the internal tape fifo
125 * in which we will start to refill the buffer. Decreasing the following
126 * parameter will improve the system's latency and interactive response, while
127 * using a high value might improve system throughput.
128 */
129 #define IDETAPE_FIFO_THRESHOLD 2
130
131 /*
132 * DSC polling parameters.
133 *
134 * Polling for DSC (a single bit in the status register) is a very important
135 * function in ide-tape. There are two cases in which we poll for DSC:
136 *
137 * 1. Before a read/write packet command, to ensure that we can transfer data
138 * from/to the tape's data buffers, without causing an actual media access.
139 * In case the tape is not ready yet, we take out our request from the device
140 * request queue, so that ide.c could service requests from the other device
141 * on the same interface in the meantime.
142 *
143 * 2. After the successful initialization of a "media access packet command",
144 * which is a command that can take a long time to complete (the interval can
145 * range from several seconds to even an hour). Again, we postpone our request
146 * in the middle to free the bus for the other device. The polling frequency
147 * here should be lower than the read/write frequency since those media access
148 * commands are slow. We start from a "fast" frequency - IDETAPE_DSC_MA_FAST
149 * (1 second), and if we don't receive DSC after IDETAPE_DSC_MA_THRESHOLD
150 * (5 min), we switch it to a lower frequency - IDETAPE_DSC_MA_SLOW (1 min).
151 *
152 * We also set a timeout for the timer, in case something goes wrong. The
153 * timeout should be longer then the maximum execution time of a tape operation.
154 */
155
156 /* DSC timings. */
157 #define IDETAPE_DSC_RW_MIN 5*HZ/100 /* 50 msec */
158 #define IDETAPE_DSC_RW_MAX 40*HZ/100 /* 400 msec */
159 #define IDETAPE_DSC_RW_TIMEOUT 2*60*HZ /* 2 minutes */
160 #define IDETAPE_DSC_MA_FAST 2*HZ /* 2 seconds */
161 #define IDETAPE_DSC_MA_THRESHOLD 5*60*HZ /* 5 minutes */
162 #define IDETAPE_DSC_MA_SLOW 30*HZ /* 30 seconds */
163 #define IDETAPE_DSC_MA_TIMEOUT 2*60*60*HZ /* 2 hours */
164
165 /*************************** End of tunable parameters ***********************/
166
167 /* Read/Write error simulation */
168 #define SIMULATE_ERRORS 0
169
170 /* tape directions */
171 enum {
172 IDETAPE_DIR_NONE = (1 << 0),
173 IDETAPE_DIR_READ = (1 << 1),
174 IDETAPE_DIR_WRITE = (1 << 2),
175 };
176
177 struct idetape_bh {
178 u32 b_size;
179 atomic_t b_count;
180 struct idetape_bh *b_reqnext;
181 char *b_data;
182 };
183
184 /* Tape door status */
185 #define DOOR_UNLOCKED 0
186 #define DOOR_LOCKED 1
187 #define DOOR_EXPLICITLY_LOCKED 2
188
189 /* Some defines for the SPACE command */
190 #define IDETAPE_SPACE_OVER_FILEMARK 1
191 #define IDETAPE_SPACE_TO_EOD 3
192
193 /* Some defines for the LOAD UNLOAD command */
194 #define IDETAPE_LU_LOAD_MASK 1
195 #define IDETAPE_LU_RETENSION_MASK 2
196 #define IDETAPE_LU_EOT_MASK 4
197
198 /*
199 * Special requests for our block device strategy routine.
200 *
201 * In order to service a character device command, we add special requests to
202 * the tail of our block device request queue and wait for their completion.
203 */
204
205 enum {
206 REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
207 REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
208 REQ_IDETAPE_READ = (1 << 2),
209 REQ_IDETAPE_WRITE = (1 << 3),
210 };
211
212 /* Error codes returned in rq->errors to the higher part of the driver. */
213 #define IDETAPE_ERROR_GENERAL 101
214 #define IDETAPE_ERROR_FILEMARK 102
215 #define IDETAPE_ERROR_EOD 103
216
217 /* Structures related to the SELECT SENSE / MODE SENSE packet commands. */
218 #define IDETAPE_BLOCK_DESCRIPTOR 0
219 #define IDETAPE_CAPABILITIES_PAGE 0x2a
220
221 /* Tape flag bits values. */
222 enum {
223 IDETAPE_FLAG_IGNORE_DSC = (1 << 0),
224 /* 0 When the tape position is unknown */
225 IDETAPE_FLAG_ADDRESS_VALID = (1 << 1),
226 /* Device already opened */
227 IDETAPE_FLAG_BUSY = (1 << 2),
228 /* Error detected in a pipeline stage */
229 IDETAPE_FLAG_PIPELINE_ERR = (1 << 3),
230 /* Attempt to auto-detect the current user block size */
231 IDETAPE_FLAG_DETECT_BS = (1 << 4),
232 /* Currently on a filemark */
233 IDETAPE_FLAG_FILEMARK = (1 << 5),
234 /* DRQ interrupt device */
235 IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 6),
236 /* pipeline active */
237 IDETAPE_FLAG_PIPELINE_ACTIVE = (1 << 7),
238 /* 0 = no tape is loaded, so we don't rewind after ejecting */
239 IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 8),
240 };
241
242 /* A pipeline stage. */
243 typedef struct idetape_stage_s {
244 struct request rq; /* The corresponding request */
245 struct idetape_bh *bh; /* The data buffers */
246 struct idetape_stage_s *next; /* Pointer to the next stage */
247 } idetape_stage_t;
248
249 /*
250 * Most of our global data which we need to save even as we leave the driver due
251 * to an interrupt or a timer event is stored in the struct defined below.
252 */
253 typedef struct ide_tape_obj {
254 ide_drive_t *drive;
255 ide_driver_t *driver;
256 struct gendisk *disk;
257 struct kref kref;
258
259 /*
260 * Since a typical character device operation requires more
261 * than one packet command, we provide here enough memory
262 * for the maximum of interconnected packet commands.
263 * The packet commands are stored in the circular array pc_stack.
264 * pc_stack_index points to the last used entry, and warps around
265 * to the start when we get to the last array entry.
266 *
267 * pc points to the current processed packet command.
268 *
269 * failed_pc points to the last failed packet command, or contains
270 * NULL if we do not need to retry any packet command. This is
271 * required since an additional packet command is needed before the
272 * retry, to get detailed information on what went wrong.
273 */
274 /* Current packet command */
275 struct ide_atapi_pc *pc;
276 /* Last failed packet command */
277 struct ide_atapi_pc *failed_pc;
278 /* Packet command stack */
279 struct ide_atapi_pc pc_stack[IDETAPE_PC_STACK];
280 /* Next free packet command storage space */
281 int pc_stack_index;
282 struct request rq_stack[IDETAPE_PC_STACK];
283 /* We implement a circular array */
284 int rq_stack_index;
285
286 /*
287 * DSC polling variables.
288 *
289 * While polling for DSC we use postponed_rq to postpone the current
290 * request so that ide.c will be able to service pending requests on the
291 * other device. Note that at most we will have only one DSC (usually
292 * data transfer) request in the device request queue. Additional
293 * requests can be queued in our internal pipeline, but they will be
294 * visible to ide.c only one at a time.
295 */
296 struct request *postponed_rq;
297 /* The time in which we started polling for DSC */
298 unsigned long dsc_polling_start;
299 /* Timer used to poll for dsc */
300 struct timer_list dsc_timer;
301 /* Read/Write dsc polling frequency */
302 unsigned long best_dsc_rw_freq;
303 unsigned long dsc_poll_freq;
304 unsigned long dsc_timeout;
305
306 /* Read position information */
307 u8 partition;
308 /* Current block */
309 unsigned int first_frame;
310
311 /* Last error information */
312 u8 sense_key, asc, ascq;
313
314 /* Character device operation */
315 unsigned int minor;
316 /* device name */
317 char name[4];
318 /* Current character device data transfer direction */
319 u8 chrdev_dir;
320
321 /* tape block size, usually 512 or 1024 bytes */
322 unsigned short blk_size;
323 int user_bs_factor;
324
325 /* Copy of the tape's Capabilities and Mechanical Page */
326 u8 caps[20];
327
328 /*
329 * Active data transfer request parameters.
330 *
331 * At most, there is only one ide-tape originated data transfer request
332 * in the device request queue. This allows ide.c to easily service
333 * requests from the other device when we postpone our active request.
334 * In the pipelined operation mode, we use our internal pipeline
335 * structure to hold more data requests. The data buffer size is chosen
336 * based on the tape's recommendation.
337 */
338 /* ptr to the request which is waiting in the device request queue */
339 struct request *active_data_rq;
340 /* Data buffer size chosen based on the tape's recommendation */
341 int stage_size;
342 idetape_stage_t *merge_stage;
343 int merge_stage_size;
344 struct idetape_bh *bh;
345 char *b_data;
346 int b_count;
347
348 /*
349 * Pipeline parameters.
350 *
351 * To accomplish non-pipelined mode, we simply set the following
352 * variables to zero (or NULL, where appropriate).
353 */
354 /* Number of currently used stages */
355 int nr_stages;
356 /* Number of pending stages */
357 int nr_pending_stages;
358 /* We will not allocate more than this number of stages */
359 int max_stages, min_pipeline, max_pipeline;
360 /* The first stage which will be removed from the pipeline */
361 idetape_stage_t *first_stage;
362 /* The currently active stage */
363 idetape_stage_t *active_stage;
364 /* Will be serviced after the currently active request */
365 idetape_stage_t *next_stage;
366 /* New requests will be added to the pipeline here */
367 idetape_stage_t *last_stage;
368 /* Optional free stage which we can use */
369 idetape_stage_t *cache_stage;
370 int pages_per_stage;
371 /* Wasted space in each stage */
372 int excess_bh_size;
373
374 /* Status/Action flags: long for set_bit */
375 unsigned long flags;
376 /* protects the ide-tape queue */
377 spinlock_t lock;
378
379 /* Measures average tape speed */
380 unsigned long avg_time;
381 int avg_size;
382 int avg_speed;
383
384 /* the door is currently locked */
385 int door_locked;
386 /* the tape hardware is write protected */
387 char drv_write_prot;
388 /* the tape is write protected (hardware or opened as read-only) */
389 char write_prot;
390
391 /*
392 * Limit the number of times a request can be postponed, to avoid an
393 * infinite postpone deadlock.
394 */
395 int postpone_cnt;
396
397 /*
398 * Measures number of frames:
399 *
400 * 1. written/read to/from the driver pipeline (pipeline_head).
401 * 2. written/read to/from the tape buffers (idetape_bh).
402 * 3. written/read by the tape to/from the media (tape_head).
403 */
404 int pipeline_head;
405 int buffer_head;
406 int tape_head;
407 int last_tape_head;
408
409 /* Speed control at the tape buffers input/output */
410 unsigned long insert_time;
411 int insert_size;
412 int insert_speed;
413 int max_insert_speed;
414 int measure_insert_time;
415
416 /* Speed regulation negative feedback loop */
417 int speed_control;
418 int pipeline_head_speed;
419 int controlled_pipeline_head_speed;
420 int uncontrolled_pipeline_head_speed;
421 int controlled_last_pipeline_head;
422 unsigned long uncontrolled_pipeline_head_time;
423 unsigned long controlled_pipeline_head_time;
424 int controlled_previous_pipeline_head;
425 int uncontrolled_previous_pipeline_head;
426 unsigned long controlled_previous_head_time;
427 unsigned long uncontrolled_previous_head_time;
428 int restart_speed_control_req;
429
430 u32 debug_mask;
431 } idetape_tape_t;
432
433 static DEFINE_MUTEX(idetape_ref_mutex);
434
435 static struct class *idetape_sysfs_class;
436
437 #define to_ide_tape(obj) container_of(obj, struct ide_tape_obj, kref)
438
439 #define ide_tape_g(disk) \
440 container_of((disk)->private_data, struct ide_tape_obj, driver)
441
442 static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
443 {
444 struct ide_tape_obj *tape = NULL;
445
446 mutex_lock(&idetape_ref_mutex);
447 tape = ide_tape_g(disk);
448 if (tape)
449 kref_get(&tape->kref);
450 mutex_unlock(&idetape_ref_mutex);
451 return tape;
452 }
453
454 static void ide_tape_release(struct kref *);
455
456 static void ide_tape_put(struct ide_tape_obj *tape)
457 {
458 mutex_lock(&idetape_ref_mutex);
459 kref_put(&tape->kref, ide_tape_release);
460 mutex_unlock(&idetape_ref_mutex);
461 }
462
463 /*
464 * The variables below are used for the character device interface. Additional
465 * state variables are defined in our ide_drive_t structure.
466 */
467 static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
468
469 #define ide_tape_f(file) ((file)->private_data)
470
471 static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
472 {
473 struct ide_tape_obj *tape = NULL;
474
475 mutex_lock(&idetape_ref_mutex);
476 tape = idetape_devs[i];
477 if (tape)
478 kref_get(&tape->kref);
479 mutex_unlock(&idetape_ref_mutex);
480 return tape;
481 }
482
483 static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
484 unsigned int bcount)
485 {
486 struct idetape_bh *bh = pc->bh;
487 int count;
488
489 while (bcount) {
490 if (bh == NULL) {
491 printk(KERN_ERR "ide-tape: bh == NULL in "
492 "idetape_input_buffers\n");
493 ide_atapi_discard_data(drive, bcount);
494 return;
495 }
496 count = min(
497 (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
498 bcount);
499 HWIF(drive)->atapi_input_bytes(drive, bh->b_data +
500 atomic_read(&bh->b_count), count);
501 bcount -= count;
502 atomic_add(count, &bh->b_count);
503 if (atomic_read(&bh->b_count) == bh->b_size) {
504 bh = bh->b_reqnext;
505 if (bh)
506 atomic_set(&bh->b_count, 0);
507 }
508 }
509 pc->bh = bh;
510 }
511
512 static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
513 unsigned int bcount)
514 {
515 struct idetape_bh *bh = pc->bh;
516 int count;
517
518 while (bcount) {
519 if (bh == NULL) {
520 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
521 __func__);
522 return;
523 }
524 count = min((unsigned int)pc->b_count, (unsigned int)bcount);
525 HWIF(drive)->atapi_output_bytes(drive, pc->b_data, count);
526 bcount -= count;
527 pc->b_data += count;
528 pc->b_count -= count;
529 if (!pc->b_count) {
530 bh = bh->b_reqnext;
531 pc->bh = bh;
532 if (bh) {
533 pc->b_data = bh->b_data;
534 pc->b_count = atomic_read(&bh->b_count);
535 }
536 }
537 }
538 }
539
540 static void idetape_update_buffers(struct ide_atapi_pc *pc)
541 {
542 struct idetape_bh *bh = pc->bh;
543 int count;
544 unsigned int bcount = pc->xferred;
545
546 if (pc->flags & PC_FLAG_WRITING)
547 return;
548 while (bcount) {
549 if (bh == NULL) {
550 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
551 __func__);
552 return;
553 }
554 count = min((unsigned int)bh->b_size, (unsigned int)bcount);
555 atomic_set(&bh->b_count, count);
556 if (atomic_read(&bh->b_count) == bh->b_size)
557 bh = bh->b_reqnext;
558 bcount -= count;
559 }
560 pc->bh = bh;
561 }
562
563 /*
564 * idetape_next_pc_storage returns a pointer to a place in which we can
565 * safely store a packet command, even though we intend to leave the
566 * driver. A storage space for a maximum of IDETAPE_PC_STACK packet
567 * commands is allocated at initialization time.
568 */
569 static struct ide_atapi_pc *idetape_next_pc_storage(ide_drive_t *drive)
570 {
571 idetape_tape_t *tape = drive->driver_data;
572
573 debug_log(DBG_PCRQ_STACK, "pc_stack_index=%d\n", tape->pc_stack_index);
574
575 if (tape->pc_stack_index == IDETAPE_PC_STACK)
576 tape->pc_stack_index = 0;
577 return (&tape->pc_stack[tape->pc_stack_index++]);
578 }
579
580 /*
581 * idetape_next_rq_storage is used along with idetape_next_pc_storage.
582 * Since we queue packet commands in the request queue, we need to
583 * allocate a request, along with the allocation of a packet command.
584 */
585
586 /**************************************************************
587 * *
588 * This should get fixed to use kmalloc(.., GFP_ATOMIC) *
589 * followed later on by kfree(). -ml *
590 * *
591 **************************************************************/
592
593 static struct request *idetape_next_rq_storage(ide_drive_t *drive)
594 {
595 idetape_tape_t *tape = drive->driver_data;
596
597 debug_log(DBG_PCRQ_STACK, "rq_stack_index=%d\n", tape->rq_stack_index);
598
599 if (tape->rq_stack_index == IDETAPE_PC_STACK)
600 tape->rq_stack_index = 0;
601 return (&tape->rq_stack[tape->rq_stack_index++]);
602 }
603
604 static void idetape_init_pc(struct ide_atapi_pc *pc)
605 {
606 memset(pc->c, 0, 12);
607 pc->retries = 0;
608 pc->flags = 0;
609 pc->req_xfer = 0;
610 pc->buf = pc->pc_buf;
611 pc->buf_size = IDETAPE_PC_BUFFER_SIZE;
612 pc->bh = NULL;
613 pc->b_data = NULL;
614 }
615
616 /*
617 * called on each failed packet command retry to analyze the request sense. We
618 * currently do not utilize this information.
619 */
620 static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
621 {
622 idetape_tape_t *tape = drive->driver_data;
623 struct ide_atapi_pc *pc = tape->failed_pc;
624
625 tape->sense_key = sense[2] & 0xF;
626 tape->asc = sense[12];
627 tape->ascq = sense[13];
628
629 debug_log(DBG_ERR, "pc = %x, sense key = %x, asc = %x, ascq = %x\n",
630 pc->c[0], tape->sense_key, tape->asc, tape->ascq);
631
632 /* Correct pc->xferred by asking the tape. */
633 if (pc->flags & PC_FLAG_DMA_ERROR) {
634 pc->xferred = pc->req_xfer -
635 tape->blk_size *
636 be32_to_cpu(get_unaligned((u32 *)&sense[3]));
637 idetape_update_buffers(pc);
638 }
639
640 /*
641 * If error was the result of a zero-length read or write command,
642 * with sense key=5, asc=0x22, ascq=0, let it slide. Some drives
643 * (i.e. Seagate STT3401A Travan) don't support 0-length read/writes.
644 */
645 if ((pc->c[0] == READ_6 || pc->c[0] == WRITE_6)
646 /* length == 0 */
647 && pc->c[4] == 0 && pc->c[3] == 0 && pc->c[2] == 0) {
648 if (tape->sense_key == 5) {
649 /* don't report an error, everything's ok */
650 pc->error = 0;
651 /* don't retry read/write */
652 pc->flags |= PC_FLAG_ABORT;
653 }
654 }
655 if (pc->c[0] == READ_6 && (sense[2] & 0x80)) {
656 pc->error = IDETAPE_ERROR_FILEMARK;
657 pc->flags |= PC_FLAG_ABORT;
658 }
659 if (pc->c[0] == WRITE_6) {
660 if ((sense[2] & 0x40) || (tape->sense_key == 0xd
661 && tape->asc == 0x0 && tape->ascq == 0x2)) {
662 pc->error = IDETAPE_ERROR_EOD;
663 pc->flags |= PC_FLAG_ABORT;
664 }
665 }
666 if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
667 if (tape->sense_key == 8) {
668 pc->error = IDETAPE_ERROR_EOD;
669 pc->flags |= PC_FLAG_ABORT;
670 }
671 if (!(pc->flags & PC_FLAG_ABORT) &&
672 pc->xferred)
673 pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
674 }
675 }
676
677 static void idetape_activate_next_stage(ide_drive_t *drive)
678 {
679 idetape_tape_t *tape = drive->driver_data;
680 idetape_stage_t *stage = tape->next_stage;
681 struct request *rq = &stage->rq;
682
683 debug_log(DBG_PROCS, "Enter %s\n", __func__);
684
685 if (stage == NULL) {
686 printk(KERN_ERR "ide-tape: bug: Trying to activate a non"
687 " existing stage\n");
688 return;
689 }
690
691 rq->rq_disk = tape->disk;
692 rq->buffer = NULL;
693 rq->special = (void *)stage->bh;
694 tape->active_data_rq = rq;
695 tape->active_stage = stage;
696 tape->next_stage = stage->next;
697 }
698
699 /* Free a stage along with its related buffers completely. */
700 static void __idetape_kfree_stage(idetape_stage_t *stage)
701 {
702 struct idetape_bh *prev_bh, *bh = stage->bh;
703 int size;
704
705 while (bh != NULL) {
706 if (bh->b_data != NULL) {
707 size = (int) bh->b_size;
708 while (size > 0) {
709 free_page((unsigned long) bh->b_data);
710 size -= PAGE_SIZE;
711 bh->b_data += PAGE_SIZE;
712 }
713 }
714 prev_bh = bh;
715 bh = bh->b_reqnext;
716 kfree(prev_bh);
717 }
718 kfree(stage);
719 }
720
721 static void idetape_kfree_stage(idetape_tape_t *tape, idetape_stage_t *stage)
722 {
723 __idetape_kfree_stage(stage);
724 }
725
726 /*
727 * Remove tape->first_stage from the pipeline. The caller should avoid race
728 * conditions.
729 */
730 static void idetape_remove_stage_head(ide_drive_t *drive)
731 {
732 idetape_tape_t *tape = drive->driver_data;
733 idetape_stage_t *stage;
734
735 debug_log(DBG_PROCS, "Enter %s\n", __func__);
736
737 if (tape->first_stage == NULL) {
738 printk(KERN_ERR "ide-tape: bug: tape->first_stage is NULL\n");
739 return;
740 }
741 if (tape->active_stage == tape->first_stage) {
742 printk(KERN_ERR "ide-tape: bug: Trying to free our active "
743 "pipeline stage\n");
744 return;
745 }
746 stage = tape->first_stage;
747 tape->first_stage = stage->next;
748 idetape_kfree_stage(tape, stage);
749 tape->nr_stages--;
750 if (tape->first_stage == NULL) {
751 tape->last_stage = NULL;
752 if (tape->next_stage != NULL)
753 printk(KERN_ERR "ide-tape: bug: tape->next_stage !="
754 " NULL\n");
755 if (tape->nr_stages)
756 printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 "
757 "now\n");
758 }
759 }
760
761 /*
762 * This will free all the pipeline stages starting from new_last_stage->next
763 * to the end of the list, and point tape->last_stage to new_last_stage.
764 */
765 static void idetape_abort_pipeline(ide_drive_t *drive,
766 idetape_stage_t *new_last_stage)
767 {
768 idetape_tape_t *tape = drive->driver_data;
769 idetape_stage_t *stage = new_last_stage->next;
770 idetape_stage_t *nstage;
771
772 debug_log(DBG_PROCS, "%s: Enter %s\n", tape->name, __func__);
773
774 while (stage) {
775 nstage = stage->next;
776 idetape_kfree_stage(tape, stage);
777 --tape->nr_stages;
778 --tape->nr_pending_stages;
779 stage = nstage;
780 }
781 if (new_last_stage)
782 new_last_stage->next = NULL;
783 tape->last_stage = new_last_stage;
784 tape->next_stage = NULL;
785 }
786
787 /*
788 * Finish servicing a request and insert a pending pipeline request into the
789 * main device queue.
790 */
791 static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
792 {
793 struct request *rq = HWGROUP(drive)->rq;
794 idetape_tape_t *tape = drive->driver_data;
795 unsigned long flags;
796 int error;
797 int remove_stage = 0;
798 idetape_stage_t *active_stage;
799
800 debug_log(DBG_PROCS, "Enter %s\n", __func__);
801
802 switch (uptodate) {
803 case 0: error = IDETAPE_ERROR_GENERAL; break;
804 case 1: error = 0; break;
805 default: error = uptodate;
806 }
807 rq->errors = error;
808 if (error)
809 tape->failed_pc = NULL;
810
811 if (!blk_special_request(rq)) {
812 ide_end_request(drive, uptodate, nr_sects);
813 return 0;
814 }
815
816 spin_lock_irqsave(&tape->lock, flags);
817
818 /* The request was a pipelined data transfer request */
819 if (tape->active_data_rq == rq) {
820 active_stage = tape->active_stage;
821 tape->active_stage = NULL;
822 tape->active_data_rq = NULL;
823 tape->nr_pending_stages--;
824 if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
825 remove_stage = 1;
826 if (error) {
827 set_bit(IDETAPE_FLAG_PIPELINE_ERR,
828 &tape->flags);
829 if (error == IDETAPE_ERROR_EOD)
830 idetape_abort_pipeline(drive,
831 active_stage);
832 }
833 } else if (rq->cmd[0] & REQ_IDETAPE_READ) {
834 if (error == IDETAPE_ERROR_EOD) {
835 set_bit(IDETAPE_FLAG_PIPELINE_ERR,
836 &tape->flags);
837 idetape_abort_pipeline(drive, active_stage);
838 }
839 }
840 if (tape->next_stage != NULL) {
841 idetape_activate_next_stage(drive);
842
843 /* Insert the next request into the request queue. */
844 (void)ide_do_drive_cmd(drive, tape->active_data_rq,
845 ide_end);
846 } else if (!error) {
847 /*
848 * This is a part of the feedback loop which tries to
849 * find the optimum number of stages. We are starting
850 * from a minimum maximum number of stages, and if we
851 * sense that the pipeline is empty, we try to increase
852 * it, until we reach the user compile time memory
853 * limit.
854 */
855 int i = (tape->max_pipeline - tape->min_pipeline) / 10;
856
857 tape->max_stages += max(i, 1);
858 tape->max_stages = max(tape->max_stages,
859 tape->min_pipeline);
860 tape->max_stages = min(tape->max_stages,
861 tape->max_pipeline);
862 }
863 }
864 ide_end_drive_cmd(drive, 0, 0);
865
866 if (remove_stage)
867 idetape_remove_stage_head(drive);
868 if (tape->active_data_rq == NULL)
869 clear_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
870 spin_unlock_irqrestore(&tape->lock, flags);
871 return 0;
872 }
873
874 static ide_startstop_t idetape_request_sense_callback(ide_drive_t *drive)
875 {
876 idetape_tape_t *tape = drive->driver_data;
877
878 debug_log(DBG_PROCS, "Enter %s\n", __func__);
879
880 if (!tape->pc->error) {
881 idetape_analyze_error(drive, tape->pc->buf);
882 idetape_end_request(drive, 1, 0);
883 } else {
884 printk(KERN_ERR "ide-tape: Error in REQUEST SENSE itself - "
885 "Aborting request!\n");
886 idetape_end_request(drive, 0, 0);
887 }
888 return ide_stopped;
889 }
890
891 static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
892 {
893 idetape_init_pc(pc);
894 pc->c[0] = REQUEST_SENSE;
895 pc->c[4] = 20;
896 pc->req_xfer = 20;
897 pc->idetape_callback = &idetape_request_sense_callback;
898 }
899
900 static void idetape_init_rq(struct request *rq, u8 cmd)
901 {
902 memset(rq, 0, sizeof(*rq));
903 rq->cmd_type = REQ_TYPE_SPECIAL;
904 rq->cmd[0] = cmd;
905 }
906
907 /*
908 * Generate a new packet command request in front of the request queue, before
909 * the current request, so that it will be processed immediately, on the next
910 * pass through the driver. The function below is called from the request
911 * handling part of the driver (the "bottom" part). Safe storage for the request
912 * should be allocated with ide_tape_next_{pc,rq}_storage() prior to that.
913 *
914 * Memory for those requests is pre-allocated at initialization time, and is
915 * limited to IDETAPE_PC_STACK requests. We assume that we have enough space for
916 * the maximum possible number of inter-dependent packet commands.
917 *
918 * The higher level of the driver - The ioctl handler and the character device
919 * handling functions should queue request to the lower level part and wait for
920 * their completion using idetape_queue_pc_tail or idetape_queue_rw_tail.
921 */
922 static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
923 struct request *rq)
924 {
925 struct ide_tape_obj *tape = drive->driver_data;
926
927 idetape_init_rq(rq, REQ_IDETAPE_PC1);
928 rq->buffer = (char *) pc;
929 rq->rq_disk = tape->disk;
930 (void) ide_do_drive_cmd(drive, rq, ide_preempt);
931 }
932
933 /*
934 * idetape_retry_pc is called when an error was detected during the
935 * last packet command. We queue a request sense packet command in
936 * the head of the request list.
937 */
938 static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
939 {
940 idetape_tape_t *tape = drive->driver_data;
941 struct ide_atapi_pc *pc;
942 struct request *rq;
943
944 (void)ide_read_error(drive);
945 pc = idetape_next_pc_storage(drive);
946 rq = idetape_next_rq_storage(drive);
947 idetape_create_request_sense_cmd(pc);
948 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
949 idetape_queue_pc_head(drive, pc, rq);
950 return ide_stopped;
951 }
952
953 /*
954 * Postpone the current request so that ide.c will be able to service requests
955 * from another device on the same hwgroup while we are polling for DSC.
956 */
957 static void idetape_postpone_request(ide_drive_t *drive)
958 {
959 idetape_tape_t *tape = drive->driver_data;
960
961 debug_log(DBG_PROCS, "Enter %s\n", __func__);
962
963 tape->postponed_rq = HWGROUP(drive)->rq;
964 ide_stall_queue(drive, tape->dsc_poll_freq);
965 }
966
967 typedef void idetape_io_buf(ide_drive_t *, struct ide_atapi_pc *, unsigned int);
968
969 /*
970 * This is the usual interrupt handler which will be called during a packet
971 * command. We will transfer some of the data (as requested by the drive) and
972 * will re-point interrupt handler to us. When data transfer is finished, we
973 * will act according to the algorithm described before
974 * idetape_issue_pc.
975 */
976 static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
977 {
978 ide_hwif_t *hwif = drive->hwif;
979 idetape_tape_t *tape = drive->driver_data;
980 struct ide_atapi_pc *pc = tape->pc;
981 xfer_func_t *xferfunc;
982 idetape_io_buf *iobuf;
983 unsigned int temp;
984 #if SIMULATE_ERRORS
985 static int error_sim_count;
986 #endif
987 u16 bcount;
988 u8 stat, ireason;
989
990 debug_log(DBG_PROCS, "Enter %s - interrupt handler\n", __func__);
991
992 /* Clear the interrupt */
993 stat = ide_read_status(drive);
994
995 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
996 if (hwif->ide_dma_end(drive) || (stat & ERR_STAT)) {
997 /*
998 * A DMA error is sometimes expected. For example,
999 * if the tape is crossing a filemark during a
1000 * READ command, it will issue an irq and position
1001 * itself before the filemark, so that only a partial
1002 * data transfer will occur (which causes the DMA
1003 * error). In that case, we will later ask the tape
1004 * how much bytes of the original request were
1005 * actually transferred (we can't receive that
1006 * information from the DMA engine on most chipsets).
1007 */
1008
1009 /*
1010 * On the contrary, a DMA error is never expected;
1011 * it usually indicates a hardware error or abort.
1012 * If the tape crosses a filemark during a READ
1013 * command, it will issue an irq and position itself
1014 * after the filemark (not before). Only a partial
1015 * data transfer will occur, but no DMA error.
1016 * (AS, 19 Apr 2001)
1017 */
1018 pc->flags |= PC_FLAG_DMA_ERROR;
1019 } else {
1020 pc->xferred = pc->req_xfer;
1021 idetape_update_buffers(pc);
1022 }
1023 debug_log(DBG_PROCS, "DMA finished\n");
1024
1025 }
1026
1027 /* No more interrupts */
1028 if ((stat & DRQ_STAT) == 0) {
1029 debug_log(DBG_SENSE, "Packet command completed, %d bytes"
1030 " transferred\n", pc->xferred);
1031
1032 pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
1033 local_irq_enable();
1034
1035 #if SIMULATE_ERRORS
1036 if ((pc->c[0] == WRITE_6 || pc->c[0] == READ_6) &&
1037 (++error_sim_count % 100) == 0) {
1038 printk(KERN_INFO "ide-tape: %s: simulating error\n",
1039 tape->name);
1040 stat |= ERR_STAT;
1041 }
1042 #endif
1043 if ((stat & ERR_STAT) && pc->c[0] == REQUEST_SENSE)
1044 stat &= ~ERR_STAT;
1045 if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) {
1046 /* Error detected */
1047 debug_log(DBG_ERR, "%s: I/O error\n", tape->name);
1048
1049 if (pc->c[0] == REQUEST_SENSE) {
1050 printk(KERN_ERR "ide-tape: I/O error in request"
1051 " sense command\n");
1052 return ide_do_reset(drive);
1053 }
1054 debug_log(DBG_ERR, "[cmd %x]: check condition\n",
1055 pc->c[0]);
1056
1057 /* Retry operation */
1058 return idetape_retry_pc(drive);
1059 }
1060 pc->error = 0;
1061 if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) &&
1062 (stat & SEEK_STAT) == 0) {
1063 /* Media access command */
1064 tape->dsc_polling_start = jiffies;
1065 tape->dsc_poll_freq = IDETAPE_DSC_MA_FAST;
1066 tape->dsc_timeout = jiffies + IDETAPE_DSC_MA_TIMEOUT;
1067 /* Allow ide.c to handle other requests */
1068 idetape_postpone_request(drive);
1069 return ide_stopped;
1070 }
1071 if (tape->failed_pc == pc)
1072 tape->failed_pc = NULL;
1073 /* Command finished - Call the callback function */
1074 return pc->idetape_callback(drive);
1075 }
1076
1077 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
1078 pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
1079 printk(KERN_ERR "ide-tape: The tape wants to issue more "
1080 "interrupts in DMA mode\n");
1081 printk(KERN_ERR "ide-tape: DMA disabled, reverting to PIO\n");
1082 ide_dma_off(drive);
1083 return ide_do_reset(drive);
1084 }
1085 /* Get the number of bytes to transfer on this interrupt. */
1086 bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
1087 hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
1088
1089 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
1090
1091 if (ireason & CD) {
1092 printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__);
1093 return ide_do_reset(drive);
1094 }
1095 if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) {
1096 /* Hopefully, we will never get here */
1097 printk(KERN_ERR "ide-tape: We wanted to %s, ",
1098 (ireason & IO) ? "Write" : "Read");
1099 printk(KERN_ERR "ide-tape: but the tape wants us to %s !\n",
1100 (ireason & IO) ? "Read" : "Write");
1101 return ide_do_reset(drive);
1102 }
1103 if (!(pc->flags & PC_FLAG_WRITING)) {
1104 /* Reading - Check that we have enough space */
1105 temp = pc->xferred + bcount;
1106 if (temp > pc->req_xfer) {
1107 if (temp > pc->buf_size) {
1108 printk(KERN_ERR "ide-tape: The tape wants to "
1109 "send us more data than expected "
1110 "- discarding data\n");
1111 ide_atapi_discard_data(drive, bcount);
1112 ide_set_handler(drive, &idetape_pc_intr,
1113 IDETAPE_WAIT_CMD, NULL);
1114 return ide_started;
1115 }
1116 debug_log(DBG_SENSE, "The tape wants to send us more "
1117 "data than expected - allowing transfer\n");
1118 }
1119 iobuf = &idetape_input_buffers;
1120 xferfunc = hwif->atapi_input_bytes;
1121 } else {
1122 iobuf = &idetape_output_buffers;
1123 xferfunc = hwif->atapi_output_bytes;
1124 }
1125
1126 if (pc->bh)
1127 iobuf(drive, pc, bcount);
1128 else
1129 xferfunc(drive, pc->cur_pos, bcount);
1130
1131 /* Update the current position */
1132 pc->xferred += bcount;
1133 pc->cur_pos += bcount;
1134
1135 debug_log(DBG_SENSE, "[cmd %x] transferred %d bytes on that intr.\n",
1136 pc->c[0], bcount);
1137
1138 /* And set the interrupt handler again */
1139 ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
1140 return ide_started;
1141 }
1142
1143 /*
1144 * Packet Command Interface
1145 *
1146 * The current Packet Command is available in tape->pc, and will not change
1147 * until we finish handling it. Each packet command is associated with a
1148 * callback function that will be called when the command is finished.
1149 *
1150 * The handling will be done in three stages:
1151 *
1152 * 1. idetape_issue_pc will send the packet command to the drive, and will set
1153 * the interrupt handler to idetape_pc_intr.
1154 *
1155 * 2. On each interrupt, idetape_pc_intr will be called. This step will be
1156 * repeated until the device signals us that no more interrupts will be issued.
1157 *
1158 * 3. ATAPI Tape media access commands have immediate status with a delayed
1159 * process. In case of a successful initiation of a media access packet command,
1160 * the DSC bit will be set when the actual execution of the command is finished.
1161 * Since the tape drive will not issue an interrupt, we have to poll for this
1162 * event. In this case, we define the request as "low priority request" by
1163 * setting rq_status to IDETAPE_RQ_POSTPONED, set a timer to poll for DSC and
1164 * exit the driver.
1165 *
1166 * ide.c will then give higher priority to requests which originate from the
1167 * other device, until will change rq_status to RQ_ACTIVE.
1168 *
1169 * 4. When the packet command is finished, it will be checked for errors.
1170 *
1171 * 5. In case an error was found, we queue a request sense packet command in
1172 * front of the request queue and retry the operation up to
1173 * IDETAPE_MAX_PC_RETRIES times.
1174 *
1175 * 6. In case no error was found, or we decided to give up and not to retry
1176 * again, the callback function will be called and then we will handle the next
1177 * request.
1178 */
1179 static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
1180 {
1181 ide_hwif_t *hwif = drive->hwif;
1182 idetape_tape_t *tape = drive->driver_data;
1183 struct ide_atapi_pc *pc = tape->pc;
1184 int retries = 100;
1185 ide_startstop_t startstop;
1186 u8 ireason;
1187
1188 if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) {
1189 printk(KERN_ERR "ide-tape: Strange, packet command initiated "
1190 "yet DRQ isn't asserted\n");
1191 return startstop;
1192 }
1193 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
1194 while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
1195 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
1196 "a packet command, retrying\n");
1197 udelay(100);
1198 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
1199 if (retries == 0) {
1200 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
1201 "issuing a packet command, ignoring\n");
1202 ireason |= CD;
1203 ireason &= ~IO;
1204 }
1205 }
1206 if ((ireason & CD) == 0 || (ireason & IO)) {
1207 printk(KERN_ERR "ide-tape: (IO,CoD) != (0,1) while issuing "
1208 "a packet command\n");
1209 return ide_do_reset(drive);
1210 }
1211 /* Set the interrupt routine */
1212 ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
1213 #ifdef CONFIG_BLK_DEV_IDEDMA
1214 /* Begin DMA, if necessary */
1215 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS)
1216 hwif->dma_start(drive);
1217 #endif
1218 /* Send the actual packet */
1219 HWIF(drive)->atapi_output_bytes(drive, pc->c, 12);
1220 return ide_started;
1221 }
1222
1223 static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
1224 struct ide_atapi_pc *pc)
1225 {
1226 ide_hwif_t *hwif = drive->hwif;
1227 idetape_tape_t *tape = drive->driver_data;
1228 int dma_ok = 0;
1229 u16 bcount;
1230
1231 if (tape->pc->c[0] == REQUEST_SENSE &&
1232 pc->c[0] == REQUEST_SENSE) {
1233 printk(KERN_ERR "ide-tape: possible ide-tape.c bug - "
1234 "Two request sense in serial were issued\n");
1235 }
1236
1237 if (tape->failed_pc == NULL && pc->c[0] != REQUEST_SENSE)
1238 tape->failed_pc = pc;
1239 /* Set the current packet command */
1240 tape->pc = pc;
1241
1242 if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
1243 (pc->flags & PC_FLAG_ABORT)) {
1244 /*
1245 * We will "abort" retrying a packet command in case legitimate
1246 * error code was received (crossing a filemark, or end of the
1247 * media, for example).
1248 */
1249 if (!(pc->flags & PC_FLAG_ABORT)) {
1250 if (!(pc->c[0] == TEST_UNIT_READY &&
1251 tape->sense_key == 2 && tape->asc == 4 &&
1252 (tape->ascq == 1 || tape->ascq == 8))) {
1253 printk(KERN_ERR "ide-tape: %s: I/O error, "
1254 "pc = %2x, key = %2x, "
1255 "asc = %2x, ascq = %2x\n",
1256 tape->name, pc->c[0],
1257 tape->sense_key, tape->asc,
1258 tape->ascq);
1259 }
1260 /* Giving up */
1261 pc->error = IDETAPE_ERROR_GENERAL;
1262 }
1263 tape->failed_pc = NULL;
1264 return pc->idetape_callback(drive);
1265 }
1266 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
1267
1268 pc->retries++;
1269 /* We haven't transferred any data yet */
1270 pc->xferred = 0;
1271 pc->cur_pos = pc->buf;
1272 /* Request to transfer the entire buffer at once */
1273 bcount = pc->req_xfer;
1274
1275 if (pc->flags & PC_FLAG_DMA_ERROR) {
1276 pc->flags &= ~PC_FLAG_DMA_ERROR;
1277 printk(KERN_WARNING "ide-tape: DMA disabled, "
1278 "reverting to PIO\n");
1279 ide_dma_off(drive);
1280 }
1281 if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
1282 dma_ok = !hwif->dma_setup(drive);
1283
1284 ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
1285 IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
1286
1287 if (dma_ok)
1288 /* Will begin DMA later */
1289 pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
1290 if (test_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags)) {
1291 ide_execute_command(drive, WIN_PACKETCMD, &idetape_transfer_pc,
1292 IDETAPE_WAIT_CMD, NULL);
1293 return ide_started;
1294 } else {
1295 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
1296 return idetape_transfer_pc(drive);
1297 }
1298 }
1299
1300 static ide_startstop_t idetape_pc_callback(ide_drive_t *drive)
1301 {
1302 idetape_tape_t *tape = drive->driver_data;
1303
1304 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1305
1306 idetape_end_request(drive, tape->pc->error ? 0 : 1, 0);
1307 return ide_stopped;
1308 }
1309
1310 /* A mode sense command is used to "sense" tape parameters. */
1311 static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
1312 {
1313 idetape_init_pc(pc);
1314 pc->c[0] = MODE_SENSE;
1315 if (page_code != IDETAPE_BLOCK_DESCRIPTOR)
1316 /* DBD = 1 - Don't return block descriptors */
1317 pc->c[1] = 8;
1318 pc->c[2] = page_code;
1319 /*
1320 * Changed pc->c[3] to 0 (255 will at best return unused info).
1321 *
1322 * For SCSI this byte is defined as subpage instead of high byte
1323 * of length and some IDE drives seem to interpret it this way
1324 * and return an error when 255 is used.
1325 */
1326 pc->c[3] = 0;
1327 /* We will just discard data in that case */
1328 pc->c[4] = 255;
1329 if (page_code == IDETAPE_BLOCK_DESCRIPTOR)
1330 pc->req_xfer = 12;
1331 else if (page_code == IDETAPE_CAPABILITIES_PAGE)
1332 pc->req_xfer = 24;
1333 else
1334 pc->req_xfer = 50;
1335 pc->idetape_callback = &idetape_pc_callback;
1336 }
1337
1338 static void idetape_calculate_speeds(ide_drive_t *drive)
1339 {
1340 idetape_tape_t *tape = drive->driver_data;
1341
1342 if (time_after(jiffies,
1343 tape->controlled_pipeline_head_time + 120 * HZ)) {
1344 tape->controlled_previous_pipeline_head =
1345 tape->controlled_last_pipeline_head;
1346 tape->controlled_previous_head_time =
1347 tape->controlled_pipeline_head_time;
1348 tape->controlled_last_pipeline_head = tape->pipeline_head;
1349 tape->controlled_pipeline_head_time = jiffies;
1350 }
1351 if (time_after(jiffies, tape->controlled_pipeline_head_time + 60 * HZ))
1352 tape->controlled_pipeline_head_speed = (tape->pipeline_head -
1353 tape->controlled_last_pipeline_head) * 32 * HZ /
1354 (jiffies - tape->controlled_pipeline_head_time);
1355 else if (time_after(jiffies, tape->controlled_previous_head_time))
1356 tape->controlled_pipeline_head_speed = (tape->pipeline_head -
1357 tape->controlled_previous_pipeline_head) * 32 *
1358 HZ / (jiffies - tape->controlled_previous_head_time);
1359
1360 if (tape->nr_pending_stages < tape->max_stages/*- 1 */) {
1361 /* -1 for read mode error recovery */
1362 if (time_after(jiffies, tape->uncontrolled_previous_head_time +
1363 10 * HZ)) {
1364 tape->uncontrolled_pipeline_head_time = jiffies;
1365 tape->uncontrolled_pipeline_head_speed =
1366 (tape->pipeline_head -
1367 tape->uncontrolled_previous_pipeline_head) *
1368 32 * HZ / (jiffies -
1369 tape->uncontrolled_previous_head_time);
1370 }
1371 } else {
1372 tape->uncontrolled_previous_head_time = jiffies;
1373 tape->uncontrolled_previous_pipeline_head = tape->pipeline_head;
1374 if (time_after(jiffies, tape->uncontrolled_pipeline_head_time +
1375 30 * HZ))
1376 tape->uncontrolled_pipeline_head_time = jiffies;
1377
1378 }
1379 tape->pipeline_head_speed = max(tape->uncontrolled_pipeline_head_speed,
1380 tape->controlled_pipeline_head_speed);
1381
1382 if (tape->speed_control == 1) {
1383 if (tape->nr_pending_stages >= tape->max_stages / 2)
1384 tape->max_insert_speed = tape->pipeline_head_speed +
1385 (1100 - tape->pipeline_head_speed) * 2 *
1386 (tape->nr_pending_stages - tape->max_stages / 2)
1387 / tape->max_stages;
1388 else
1389 tape->max_insert_speed = 500 +
1390 (tape->pipeline_head_speed - 500) * 2 *
1391 tape->nr_pending_stages / tape->max_stages;
1392
1393 if (tape->nr_pending_stages >= tape->max_stages * 99 / 100)
1394 tape->max_insert_speed = 5000;
1395 } else
1396 tape->max_insert_speed = tape->speed_control;
1397
1398 tape->max_insert_speed = max(tape->max_insert_speed, 500);
1399 }
1400
1401 static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
1402 {
1403 idetape_tape_t *tape = drive->driver_data;
1404 struct ide_atapi_pc *pc = tape->pc;
1405 u8 stat;
1406
1407 stat = ide_read_status(drive);
1408
1409 if (stat & SEEK_STAT) {
1410 if (stat & ERR_STAT) {
1411 /* Error detected */
1412 if (pc->c[0] != TEST_UNIT_READY)
1413 printk(KERN_ERR "ide-tape: %s: I/O error, ",
1414 tape->name);
1415 /* Retry operation */
1416 return idetape_retry_pc(drive);
1417 }
1418 pc->error = 0;
1419 if (tape->failed_pc == pc)
1420 tape->failed_pc = NULL;
1421 } else {
1422 pc->error = IDETAPE_ERROR_GENERAL;
1423 tape->failed_pc = NULL;
1424 }
1425 return pc->idetape_callback(drive);
1426 }
1427
1428 static ide_startstop_t idetape_rw_callback(ide_drive_t *drive)
1429 {
1430 idetape_tape_t *tape = drive->driver_data;
1431 struct request *rq = HWGROUP(drive)->rq;
1432 int blocks = tape->pc->xferred / tape->blk_size;
1433
1434 tape->avg_size += blocks * tape->blk_size;
1435 tape->insert_size += blocks * tape->blk_size;
1436 if (tape->insert_size > 1024 * 1024)
1437 tape->measure_insert_time = 1;
1438 if (tape->measure_insert_time) {
1439 tape->measure_insert_time = 0;
1440 tape->insert_time = jiffies;
1441 tape->insert_size = 0;
1442 }
1443 if (time_after(jiffies, tape->insert_time))
1444 tape->insert_speed = tape->insert_size / 1024 * HZ /
1445 (jiffies - tape->insert_time);
1446 if (time_after_eq(jiffies, tape->avg_time + HZ)) {
1447 tape->avg_speed = tape->avg_size * HZ /
1448 (jiffies - tape->avg_time) / 1024;
1449 tape->avg_size = 0;
1450 tape->avg_time = jiffies;
1451 }
1452 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1453
1454 tape->first_frame += blocks;
1455 rq->current_nr_sectors -= blocks;
1456
1457 if (!tape->pc->error)
1458 idetape_end_request(drive, 1, 0);
1459 else
1460 idetape_end_request(drive, tape->pc->error, 0);
1461 return ide_stopped;
1462 }
1463
1464 static void idetape_create_read_cmd(idetape_tape_t *tape,
1465 struct ide_atapi_pc *pc,
1466 unsigned int length, struct idetape_bh *bh)
1467 {
1468 idetape_init_pc(pc);
1469 pc->c[0] = READ_6;
1470 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
1471 pc->c[1] = 1;
1472 pc->idetape_callback = &idetape_rw_callback;
1473 pc->bh = bh;
1474 atomic_set(&bh->b_count, 0);
1475 pc->buf = NULL;
1476 pc->buf_size = length * tape->blk_size;
1477 pc->req_xfer = pc->buf_size;
1478 if (pc->req_xfer == tape->stage_size)
1479 pc->flags |= PC_FLAG_DMA_RECOMMENDED;
1480 }
1481
1482 static void idetape_create_write_cmd(idetape_tape_t *tape,
1483 struct ide_atapi_pc *pc,
1484 unsigned int length, struct idetape_bh *bh)
1485 {
1486 idetape_init_pc(pc);
1487 pc->c[0] = WRITE_6;
1488 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
1489 pc->c[1] = 1;
1490 pc->idetape_callback = &idetape_rw_callback;
1491 pc->flags |= PC_FLAG_WRITING;
1492 pc->bh = bh;
1493 pc->b_data = bh->b_data;
1494 pc->b_count = atomic_read(&bh->b_count);
1495 pc->buf = NULL;
1496 pc->buf_size = length * tape->blk_size;
1497 pc->req_xfer = pc->buf_size;
1498 if (pc->req_xfer == tape->stage_size)
1499 pc->flags |= PC_FLAG_DMA_RECOMMENDED;
1500 }
1501
1502 static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1503 struct request *rq, sector_t block)
1504 {
1505 idetape_tape_t *tape = drive->driver_data;
1506 struct ide_atapi_pc *pc = NULL;
1507 struct request *postponed_rq = tape->postponed_rq;
1508 u8 stat;
1509
1510 debug_log(DBG_SENSE, "sector: %ld, nr_sectors: %ld,"
1511 " current_nr_sectors: %d\n",
1512 rq->sector, rq->nr_sectors, rq->current_nr_sectors);
1513
1514 if (!blk_special_request(rq)) {
1515 /* We do not support buffer cache originated requests. */
1516 printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
1517 "request queue (%d)\n", drive->name, rq->cmd_type);
1518 ide_end_request(drive, 0, 0);
1519 return ide_stopped;
1520 }
1521
1522 /* Retry a failed packet command */
1523 if (tape->failed_pc && tape->pc->c[0] == REQUEST_SENSE)
1524 return idetape_issue_pc(drive, tape->failed_pc);
1525
1526 if (postponed_rq != NULL)
1527 if (rq != postponed_rq) {
1528 printk(KERN_ERR "ide-tape: ide-tape.c bug - "
1529 "Two DSC requests were queued\n");
1530 idetape_end_request(drive, 0, 0);
1531 return ide_stopped;
1532 }
1533
1534 tape->postponed_rq = NULL;
1535
1536 /*
1537 * If the tape is still busy, postpone our request and service
1538 * the other device meanwhile.
1539 */
1540 stat = ide_read_status(drive);
1541
1542 if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
1543 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
1544
1545 if (drive->post_reset == 1) {
1546 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
1547 drive->post_reset = 0;
1548 }
1549
1550 if (time_after(jiffies, tape->insert_time))
1551 tape->insert_speed = tape->insert_size / 1024 * HZ /
1552 (jiffies - tape->insert_time);
1553 idetape_calculate_speeds(drive);
1554 if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) &&
1555 (stat & SEEK_STAT) == 0) {
1556 if (postponed_rq == NULL) {
1557 tape->dsc_polling_start = jiffies;
1558 tape->dsc_poll_freq = tape->best_dsc_rw_freq;
1559 tape->dsc_timeout = jiffies + IDETAPE_DSC_RW_TIMEOUT;
1560 } else if (time_after(jiffies, tape->dsc_timeout)) {
1561 printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
1562 tape->name);
1563 if (rq->cmd[0] & REQ_IDETAPE_PC2) {
1564 idetape_media_access_finished(drive);
1565 return ide_stopped;
1566 } else {
1567 return ide_do_reset(drive);
1568 }
1569 } else if (time_after(jiffies,
1570 tape->dsc_polling_start +
1571 IDETAPE_DSC_MA_THRESHOLD))
1572 tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW;
1573 idetape_postpone_request(drive);
1574 return ide_stopped;
1575 }
1576 if (rq->cmd[0] & REQ_IDETAPE_READ) {
1577 tape->buffer_head++;
1578 tape->postpone_cnt = 0;
1579 pc = idetape_next_pc_storage(drive);
1580 idetape_create_read_cmd(tape, pc, rq->current_nr_sectors,
1581 (struct idetape_bh *)rq->special);
1582 goto out;
1583 }
1584 if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
1585 tape->buffer_head++;
1586 tape->postpone_cnt = 0;
1587 pc = idetape_next_pc_storage(drive);
1588 idetape_create_write_cmd(tape, pc, rq->current_nr_sectors,
1589 (struct idetape_bh *)rq->special);
1590 goto out;
1591 }
1592 if (rq->cmd[0] & REQ_IDETAPE_PC1) {
1593 pc = (struct ide_atapi_pc *) rq->buffer;
1594 rq->cmd[0] &= ~(REQ_IDETAPE_PC1);
1595 rq->cmd[0] |= REQ_IDETAPE_PC2;
1596 goto out;
1597 }
1598 if (rq->cmd[0] & REQ_IDETAPE_PC2) {
1599 idetape_media_access_finished(drive);
1600 return ide_stopped;
1601 }
1602 BUG();
1603 out:
1604 return idetape_issue_pc(drive, pc);
1605 }
1606
1607 /* Pipeline related functions */
1608
1609 /*
1610 * The function below uses __get_free_page to allocate a pipeline stage, along
1611 * with all the necessary small buffers which together make a buffer of size
1612 * tape->stage_size (or a bit more). We attempt to combine sequential pages as
1613 * much as possible.
1614 *
1615 * It returns a pointer to the new allocated stage, or NULL if we can't (or
1616 * don't want to) allocate a stage.
1617 *
1618 * Pipeline stages are optional and are used to increase performance. If we
1619 * can't allocate them, we'll manage without them.
1620 */
1621 static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full,
1622 int clear)
1623 {
1624 idetape_stage_t *stage;
1625 struct idetape_bh *prev_bh, *bh;
1626 int pages = tape->pages_per_stage;
1627 char *b_data = NULL;
1628
1629 stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL);
1630 if (!stage)
1631 return NULL;
1632 stage->next = NULL;
1633
1634 stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
1635 bh = stage->bh;
1636 if (bh == NULL)
1637 goto abort;
1638 bh->b_reqnext = NULL;
1639 bh->b_data = (char *) __get_free_page(GFP_KERNEL);
1640 if (!bh->b_data)
1641 goto abort;
1642 if (clear)
1643 memset(bh->b_data, 0, PAGE_SIZE);
1644 bh->b_size = PAGE_SIZE;
1645 atomic_set(&bh->b_count, full ? bh->b_size : 0);
1646
1647 while (--pages) {
1648 b_data = (char *) __get_free_page(GFP_KERNEL);
1649 if (!b_data)
1650 goto abort;
1651 if (clear)
1652 memset(b_data, 0, PAGE_SIZE);
1653 if (bh->b_data == b_data + PAGE_SIZE) {
1654 bh->b_size += PAGE_SIZE;
1655 bh->b_data -= PAGE_SIZE;
1656 if (full)
1657 atomic_add(PAGE_SIZE, &bh->b_count);
1658 continue;
1659 }
1660 if (b_data == bh->b_data + bh->b_size) {
1661 bh->b_size += PAGE_SIZE;
1662 if (full)
1663 atomic_add(PAGE_SIZE, &bh->b_count);
1664 continue;
1665 }
1666 prev_bh = bh;
1667 bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
1668 if (!bh) {
1669 free_page((unsigned long) b_data);
1670 goto abort;
1671 }
1672 bh->b_reqnext = NULL;
1673 bh->b_data = b_data;
1674 bh->b_size = PAGE_SIZE;
1675 atomic_set(&bh->b_count, full ? bh->b_size : 0);
1676 prev_bh->b_reqnext = bh;
1677 }
1678 bh->b_size -= tape->excess_bh_size;
1679 if (full)
1680 atomic_sub(tape->excess_bh_size, &bh->b_count);
1681 return stage;
1682 abort:
1683 __idetape_kfree_stage(stage);
1684 return NULL;
1685 }
1686
1687 static idetape_stage_t *idetape_kmalloc_stage(idetape_tape_t *tape)
1688 {
1689 idetape_stage_t *cache_stage = tape->cache_stage;
1690
1691 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1692
1693 if (tape->nr_stages >= tape->max_stages)
1694 return NULL;
1695 if (cache_stage != NULL) {
1696 tape->cache_stage = NULL;
1697 return cache_stage;
1698 }
1699 return __idetape_kmalloc_stage(tape, 0, 0);
1700 }
1701
1702 static int idetape_copy_stage_from_user(idetape_tape_t *tape,
1703 idetape_stage_t *stage, const char __user *buf, int n)
1704 {
1705 struct idetape_bh *bh = tape->bh;
1706 int count;
1707 int ret = 0;
1708
1709 while (n) {
1710 if (bh == NULL) {
1711 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
1712 __func__);
1713 return 1;
1714 }
1715 count = min((unsigned int)
1716 (bh->b_size - atomic_read(&bh->b_count)),
1717 (unsigned int)n);
1718 if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf,
1719 count))
1720 ret = 1;
1721 n -= count;
1722 atomic_add(count, &bh->b_count);
1723 buf += count;
1724 if (atomic_read(&bh->b_count) == bh->b_size) {
1725 bh = bh->b_reqnext;
1726 if (bh)
1727 atomic_set(&bh->b_count, 0);
1728 }
1729 }
1730 tape->bh = bh;
1731 return ret;
1732 }
1733
1734 static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
1735 idetape_stage_t *stage, int n)
1736 {
1737 struct idetape_bh *bh = tape->bh;
1738 int count;
1739 int ret = 0;
1740
1741 while (n) {
1742 if (bh == NULL) {
1743 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
1744 __func__);
1745 return 1;
1746 }
1747 count = min(tape->b_count, n);
1748 if (copy_to_user(buf, tape->b_data, count))
1749 ret = 1;
1750 n -= count;
1751 tape->b_data += count;
1752 tape->b_count -= count;
1753 buf += count;
1754 if (!tape->b_count) {
1755 bh = bh->b_reqnext;
1756 tape->bh = bh;
1757 if (bh) {
1758 tape->b_data = bh->b_data;
1759 tape->b_count = atomic_read(&bh->b_count);
1760 }
1761 }
1762 }
1763 return ret;
1764 }
1765
1766 static void idetape_init_merge_stage(idetape_tape_t *tape)
1767 {
1768 struct idetape_bh *bh = tape->merge_stage->bh;
1769
1770 tape->bh = bh;
1771 if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
1772 atomic_set(&bh->b_count, 0);
1773 else {
1774 tape->b_data = bh->b_data;
1775 tape->b_count = atomic_read(&bh->b_count);
1776 }
1777 }
1778
1779 static void idetape_switch_buffers(idetape_tape_t *tape, idetape_stage_t *stage)
1780 {
1781 struct idetape_bh *tmp;
1782
1783 tmp = stage->bh;
1784 stage->bh = tape->merge_stage->bh;
1785 tape->merge_stage->bh = tmp;
1786 idetape_init_merge_stage(tape);
1787 }
1788
1789 /* Add a new stage at the end of the pipeline. */
1790 static void idetape_add_stage_tail(ide_drive_t *drive, idetape_stage_t *stage)
1791 {
1792 idetape_tape_t *tape = drive->driver_data;
1793 unsigned long flags;
1794
1795 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1796
1797 spin_lock_irqsave(&tape->lock, flags);
1798 stage->next = NULL;
1799 if (tape->last_stage != NULL)
1800 tape->last_stage->next = stage;
1801 else
1802 tape->first_stage = stage;
1803 tape->next_stage = stage;
1804 tape->last_stage = stage;
1805 if (tape->next_stage == NULL)
1806 tape->next_stage = tape->last_stage;
1807 tape->nr_stages++;
1808 tape->nr_pending_stages++;
1809 spin_unlock_irqrestore(&tape->lock, flags);
1810 }
1811
1812 /* Install a completion in a pending request and sleep until it is serviced. The
1813 * caller should ensure that the request will not be serviced before we install
1814 * the completion (usually by disabling interrupts).
1815 */
1816 static void idetape_wait_for_request(ide_drive_t *drive, struct request *rq)
1817 {
1818 DECLARE_COMPLETION_ONSTACK(wait);
1819 idetape_tape_t *tape = drive->driver_data;
1820
1821 if (rq == NULL || !blk_special_request(rq)) {
1822 printk(KERN_ERR "ide-tape: bug: Trying to sleep on non-valid"
1823 " request\n");
1824 return;
1825 }
1826 rq->end_io_data = &wait;
1827 rq->end_io = blk_end_sync_rq;
1828 spin_unlock_irq(&tape->lock);
1829 wait_for_completion(&wait);
1830 /* The stage and its struct request have been deallocated */
1831 spin_lock_irq(&tape->lock);
1832 }
1833
1834 static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
1835 {
1836 idetape_tape_t *tape = drive->driver_data;
1837 u8 *readpos = tape->pc->buf;
1838
1839 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1840
1841 if (!tape->pc->error) {
1842 debug_log(DBG_SENSE, "BOP - %s\n",
1843 (readpos[0] & 0x80) ? "Yes" : "No");
1844 debug_log(DBG_SENSE, "EOP - %s\n",
1845 (readpos[0] & 0x40) ? "Yes" : "No");
1846
1847 if (readpos[0] & 0x4) {
1848 printk(KERN_INFO "ide-tape: Block location is unknown"
1849 "to the tape\n");
1850 clear_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
1851 idetape_end_request(drive, 0, 0);
1852 } else {
1853 debug_log(DBG_SENSE, "Block Location - %u\n",
1854 be32_to_cpu(*(u32 *)&readpos[4]));
1855
1856 tape->partition = readpos[1];
1857 tape->first_frame =
1858 be32_to_cpu(*(u32 *)&readpos[4]);
1859 set_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
1860 idetape_end_request(drive, 1, 0);
1861 }
1862 } else {
1863 idetape_end_request(drive, 0, 0);
1864 }
1865 return ide_stopped;
1866 }
1867
1868 /*
1869 * Write a filemark if write_filemark=1. Flush the device buffers without
1870 * writing a filemark otherwise.
1871 */
1872 static void idetape_create_write_filemark_cmd(ide_drive_t *drive,
1873 struct ide_atapi_pc *pc, int write_filemark)
1874 {
1875 idetape_init_pc(pc);
1876 pc->c[0] = WRITE_FILEMARKS;
1877 pc->c[4] = write_filemark;
1878 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1879 pc->idetape_callback = &idetape_pc_callback;
1880 }
1881
1882 static void idetape_create_test_unit_ready_cmd(struct ide_atapi_pc *pc)
1883 {
1884 idetape_init_pc(pc);
1885 pc->c[0] = TEST_UNIT_READY;
1886 pc->idetape_callback = &idetape_pc_callback;
1887 }
1888
1889 /*
1890 * We add a special packet command request to the tail of the request queue, and
1891 * wait for it to be serviced. This is not to be called from within the request
1892 * handling part of the driver! We allocate here data on the stack and it is
1893 * valid until the request is finished. This is not the case for the bottom part
1894 * of the driver, where we are always leaving the functions to wait for an
1895 * interrupt or a timer event.
1896 *
1897 * From the bottom part of the driver, we should allocate safe memory using
1898 * idetape_next_pc_storage() and ide_tape_next_rq_storage(), and add the request
1899 * to the request list without waiting for it to be serviced! In that case, we
1900 * usually use idetape_queue_pc_head().
1901 */
1902 static int __idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
1903 {
1904 struct ide_tape_obj *tape = drive->driver_data;
1905 struct request rq;
1906
1907 idetape_init_rq(&rq, REQ_IDETAPE_PC1);
1908 rq.buffer = (char *) pc;
1909 rq.rq_disk = tape->disk;
1910 return ide_do_drive_cmd(drive, &rq, ide_wait);
1911 }
1912
1913 static void idetape_create_load_unload_cmd(ide_drive_t *drive,
1914 struct ide_atapi_pc *pc, int cmd)
1915 {
1916 idetape_init_pc(pc);
1917 pc->c[0] = START_STOP;
1918 pc->c[4] = cmd;
1919 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1920 pc->idetape_callback = &idetape_pc_callback;
1921 }
1922
1923 static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
1924 {
1925 idetape_tape_t *tape = drive->driver_data;
1926 struct ide_atapi_pc pc;
1927 int load_attempted = 0;
1928
1929 /* Wait for the tape to become ready */
1930 set_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
1931 timeout += jiffies;
1932 while (time_before(jiffies, timeout)) {
1933 idetape_create_test_unit_ready_cmd(&pc);
1934 if (!__idetape_queue_pc_tail(drive, &pc))
1935 return 0;
1936 if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
1937 || (tape->asc == 0x3A)) {
1938 /* no media */
1939 if (load_attempted)
1940 return -ENOMEDIUM;
1941 idetape_create_load_unload_cmd(drive, &pc,
1942 IDETAPE_LU_LOAD_MASK);
1943 __idetape_queue_pc_tail(drive, &pc);
1944 load_attempted = 1;
1945 /* not about to be ready */
1946 } else if (!(tape->sense_key == 2 && tape->asc == 4 &&
1947 (tape->ascq == 1 || tape->ascq == 8)))
1948 return -EIO;
1949 msleep(100);
1950 }
1951 return -EIO;
1952 }
1953
1954 static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
1955 {
1956 return __idetape_queue_pc_tail(drive, pc);
1957 }
1958
1959 static int idetape_flush_tape_buffers(ide_drive_t *drive)
1960 {
1961 struct ide_atapi_pc pc;
1962 int rc;
1963
1964 idetape_create_write_filemark_cmd(drive, &pc, 0);
1965 rc = idetape_queue_pc_tail(drive, &pc);
1966 if (rc)
1967 return rc;
1968 idetape_wait_ready(drive, 60 * 5 * HZ);
1969 return 0;
1970 }
1971
1972 static void idetape_create_read_position_cmd(struct ide_atapi_pc *pc)
1973 {
1974 idetape_init_pc(pc);
1975 pc->c[0] = READ_POSITION;
1976 pc->req_xfer = 20;
1977 pc->idetape_callback = &idetape_read_position_callback;
1978 }
1979
1980 static int idetape_read_position(ide_drive_t *drive)
1981 {
1982 idetape_tape_t *tape = drive->driver_data;
1983 struct ide_atapi_pc pc;
1984 int position;
1985
1986 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1987
1988 idetape_create_read_position_cmd(&pc);
1989 if (idetape_queue_pc_tail(drive, &pc))
1990 return -1;
1991 position = tape->first_frame;
1992 return position;
1993 }
1994
1995 static void idetape_create_locate_cmd(ide_drive_t *drive,
1996 struct ide_atapi_pc *pc,
1997 unsigned int block, u8 partition, int skip)
1998 {
1999 idetape_init_pc(pc);
2000 pc->c[0] = POSITION_TO_ELEMENT;
2001 pc->c[1] = 2;
2002 put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[3]);
2003 pc->c[8] = partition;
2004 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
2005 pc->idetape_callback = &idetape_pc_callback;
2006 }
2007
2008 static int idetape_create_prevent_cmd(ide_drive_t *drive,
2009 struct ide_atapi_pc *pc, int prevent)
2010 {
2011 idetape_tape_t *tape = drive->driver_data;
2012
2013 /* device supports locking according to capabilities page */
2014 if (!(tape->caps[6] & 0x01))
2015 return 0;
2016
2017 idetape_init_pc(pc);
2018 pc->c[0] = ALLOW_MEDIUM_REMOVAL;
2019 pc->c[4] = prevent;
2020 pc->idetape_callback = &idetape_pc_callback;
2021 return 1;
2022 }
2023
2024 static int __idetape_discard_read_pipeline(ide_drive_t *drive)
2025 {
2026 idetape_tape_t *tape = drive->driver_data;
2027 unsigned long flags;
2028 int cnt;
2029
2030 if (tape->chrdev_dir != IDETAPE_DIR_READ)
2031 return 0;
2032
2033 /* Remove merge stage. */
2034 cnt = tape->merge_stage_size / tape->blk_size;
2035 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
2036 ++cnt; /* Filemarks count as 1 sector */
2037 tape->merge_stage_size = 0;
2038 if (tape->merge_stage != NULL) {
2039 __idetape_kfree_stage(tape->merge_stage);
2040 tape->merge_stage = NULL;
2041 }
2042
2043 /* Clear pipeline flags. */
2044 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
2045 tape->chrdev_dir = IDETAPE_DIR_NONE;
2046
2047 /* Remove pipeline stages. */
2048 if (tape->first_stage == NULL)
2049 return 0;
2050
2051 spin_lock_irqsave(&tape->lock, flags);
2052 tape->next_stage = NULL;
2053 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags))
2054 idetape_wait_for_request(drive, tape->active_data_rq);
2055 spin_unlock_irqrestore(&tape->lock, flags);
2056
2057 while (tape->first_stage != NULL) {
2058 struct request *rq_ptr = &tape->first_stage->rq;
2059
2060 cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
2061 if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
2062 ++cnt;
2063 idetape_remove_stage_head(drive);
2064 }
2065 tape->nr_pending_stages = 0;
2066 tape->max_stages = tape->min_pipeline;
2067 return cnt;
2068 }
2069
2070 /*
2071 * Position the tape to the requested block using the LOCATE packet command.
2072 * A READ POSITION command is then issued to check where we are positioned. Like
2073 * all higher level operations, we queue the commands at the tail of the request
2074 * queue and wait for their completion.
2075 */
2076 static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
2077 u8 partition, int skip)
2078 {
2079 idetape_tape_t *tape = drive->driver_data;
2080 int retval;
2081 struct ide_atapi_pc pc;
2082
2083 if (tape->chrdev_dir == IDETAPE_DIR_READ)
2084 __idetape_discard_read_pipeline(drive);
2085 idetape_wait_ready(drive, 60 * 5 * HZ);
2086 idetape_create_locate_cmd(drive, &pc, block, partition, skip);
2087 retval = idetape_queue_pc_tail(drive, &pc);
2088 if (retval)
2089 return (retval);
2090
2091 idetape_create_read_position_cmd(&pc);
2092 return (idetape_queue_pc_tail(drive, &pc));
2093 }
2094
2095 static void idetape_discard_read_pipeline(ide_drive_t *drive,
2096 int restore_position)
2097 {
2098 idetape_tape_t *tape = drive->driver_data;
2099 int cnt;
2100 int seek, position;
2101
2102 cnt = __idetape_discard_read_pipeline(drive);
2103 if (restore_position) {
2104 position = idetape_read_position(drive);
2105 seek = position > cnt ? position - cnt : 0;
2106 if (idetape_position_tape(drive, seek, 0, 0)) {
2107 printk(KERN_INFO "ide-tape: %s: position_tape failed in"
2108 " discard_pipeline()\n", tape->name);
2109 return;
2110 }
2111 }
2112 }
2113
2114 /*
2115 * Generate a read/write request for the block device interface and wait for it
2116 * to be serviced.
2117 */
2118 static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
2119 struct idetape_bh *bh)
2120 {
2121 idetape_tape_t *tape = drive->driver_data;
2122 struct request rq;
2123
2124 debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
2125
2126 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
2127 printk(KERN_ERR "ide-tape: bug: the pipeline is active in %s\n",
2128 __func__);
2129 return (0);
2130 }
2131
2132 idetape_init_rq(&rq, cmd);
2133 rq.rq_disk = tape->disk;
2134 rq.special = (void *)bh;
2135 rq.sector = tape->first_frame;
2136 rq.nr_sectors = blocks;
2137 rq.current_nr_sectors = blocks;
2138 (void) ide_do_drive_cmd(drive, &rq, ide_wait);
2139
2140 if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
2141 return 0;
2142
2143 if (tape->merge_stage)
2144 idetape_init_merge_stage(tape);
2145 if (rq.errors == IDETAPE_ERROR_GENERAL)
2146 return -EIO;
2147 return (tape->blk_size * (blocks-rq.current_nr_sectors));
2148 }
2149
2150 /* start servicing the pipeline stages, starting from tape->next_stage. */
2151 static void idetape_plug_pipeline(ide_drive_t *drive)
2152 {
2153 idetape_tape_t *tape = drive->driver_data;
2154
2155 if (tape->next_stage == NULL)
2156 return;
2157 if (!test_and_set_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
2158 idetape_activate_next_stage(drive);
2159 (void) ide_do_drive_cmd(drive, tape->active_data_rq, ide_end);
2160 }
2161 }
2162
2163 static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc)
2164 {
2165 idetape_init_pc(pc);
2166 pc->c[0] = INQUIRY;
2167 pc->c[4] = 254;
2168 pc->req_xfer = 254;
2169 pc->idetape_callback = &idetape_pc_callback;
2170 }
2171
2172 static void idetape_create_rewind_cmd(ide_drive_t *drive,
2173 struct ide_atapi_pc *pc)
2174 {
2175 idetape_init_pc(pc);
2176 pc->c[0] = REZERO_UNIT;
2177 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
2178 pc->idetape_callback = &idetape_pc_callback;
2179 }
2180
2181 static void idetape_create_erase_cmd(struct ide_atapi_pc *pc)
2182 {
2183 idetape_init_pc(pc);
2184 pc->c[0] = ERASE;
2185 pc->c[1] = 1;
2186 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
2187 pc->idetape_callback = &idetape_pc_callback;
2188 }
2189
2190 static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
2191 {
2192 idetape_init_pc(pc);
2193 pc->c[0] = SPACE;
2194 put_unaligned(cpu_to_be32(count), (unsigned int *) &pc->c[1]);
2195 pc->c[1] = cmd;
2196 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
2197 pc->idetape_callback = &idetape_pc_callback;
2198 }
2199
2200 static void idetape_wait_first_stage(ide_drive_t *drive)
2201 {
2202 idetape_tape_t *tape = drive->driver_data;
2203 unsigned long flags;
2204
2205 if (tape->first_stage == NULL)
2206 return;
2207 spin_lock_irqsave(&tape->lock, flags);
2208 if (tape->active_stage == tape->first_stage)
2209 idetape_wait_for_request(drive, tape->active_data_rq);
2210 spin_unlock_irqrestore(&tape->lock, flags);
2211 }
2212
2213 /*
2214 * Try to add a character device originated write request to our pipeline. In
2215 * case we don't succeed, we revert to non-pipelined operation mode for this
2216 * request. In order to accomplish that, we
2217 *
2218 * 1. Try to allocate a new pipeline stage.
2219 * 2. If we can't, wait for more and more requests to be serviced and try again
2220 * each time.
2221 * 3. If we still can't allocate a stage, fallback to non-pipelined operation
2222 * mode for this request.
2223 */
2224 static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
2225 {
2226 idetape_tape_t *tape = drive->driver_data;
2227 idetape_stage_t *new_stage;
2228 unsigned long flags;
2229 struct request *rq;
2230
2231 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
2232
2233 /* Attempt to allocate a new stage. Beware possible race conditions. */
2234 while ((new_stage = idetape_kmalloc_stage(tape)) == NULL) {
2235 spin_lock_irqsave(&tape->lock, flags);
2236 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
2237 idetape_wait_for_request(drive, tape->active_data_rq);
2238 spin_unlock_irqrestore(&tape->lock, flags);
2239 } else {
2240 spin_unlock_irqrestore(&tape->lock, flags);
2241 idetape_plug_pipeline(drive);
2242 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
2243 &tape->flags))
2244 continue;
2245 /*
2246 * The machine is short on memory. Fallback to non-
2247 * pipelined operation mode for this request.
2248 */
2249 return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
2250 blocks, tape->merge_stage->bh);
2251 }
2252 }
2253 rq = &new_stage->rq;
2254 idetape_init_rq(rq, REQ_IDETAPE_WRITE);
2255 /* Doesn't actually matter - We always assume sequential access */
2256 rq->sector = tape->first_frame;
2257 rq->current_nr_sectors = blocks;
2258 rq->nr_sectors = blocks;
2259
2260 idetape_switch_buffers(tape, new_stage);
2261 idetape_add_stage_tail(drive, new_stage);
2262 tape->pipeline_head++;
2263 idetape_calculate_speeds(drive);
2264
2265 /*
2266 * Estimate whether the tape has stopped writing by checking if our
2267 * write pipeline is currently empty. If we are not writing anymore,
2268 * wait for the pipeline to be almost completely full (90%) before
2269 * starting to service requests, so that we will be able to keep up with
2270 * the higher speeds of the tape.
2271 */
2272 if (!test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
2273 if (tape->nr_stages >= tape->max_stages * 9 / 10 ||
2274 tape->nr_stages >= tape->max_stages -
2275 tape->uncontrolled_pipeline_head_speed * 3 * 1024 /
2276 tape->blk_size) {
2277 tape->measure_insert_time = 1;
2278 tape->insert_time = jiffies;
2279 tape->insert_size = 0;
2280 tape->insert_speed = 0;
2281 idetape_plug_pipeline(drive);
2282 }
2283 }
2284 if (test_and_clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
2285 /* Return a deferred error */
2286 return -EIO;
2287 return blocks;
2288 }
2289
2290 /*
2291 * Wait until all pending pipeline requests are serviced. Typically called on
2292 * device close.
2293 */
2294 static void idetape_wait_for_pipeline(ide_drive_t *drive)
2295 {
2296 idetape_tape_t *tape = drive->driver_data;
2297 unsigned long flags;
2298
2299 while (tape->next_stage || test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
2300 &tape->flags)) {
2301 idetape_plug_pipeline(drive);
2302 spin_lock_irqsave(&tape->lock, flags);
2303 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags))
2304 idetape_wait_for_request(drive, tape->active_data_rq);
2305 spin_unlock_irqrestore(&tape->lock, flags);
2306 }
2307 }
2308
2309 static void idetape_empty_write_pipeline(ide_drive_t *drive)
2310 {
2311 idetape_tape_t *tape = drive->driver_data;
2312 int blocks, min;
2313 struct idetape_bh *bh;
2314
2315 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
2316 printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline,"
2317 " but we are not writing.\n");
2318 return;
2319 }
2320 if (tape->merge_stage_size > tape->stage_size) {
2321 printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
2322 tape->merge_stage_size = tape->stage_size;
2323 }
2324 if (tape->merge_stage_size) {
2325 blocks = tape->merge_stage_size / tape->blk_size;
2326 if (tape->merge_stage_size % tape->blk_size) {
2327 unsigned int i;
2328
2329 blocks++;
2330 i = tape->blk_size - tape->merge_stage_size %
2331 tape->blk_size;
2332 bh = tape->bh->b_reqnext;
2333 while (bh) {
2334 atomic_set(&bh->b_count, 0);
2335 bh = bh->b_reqnext;
2336 }
2337 bh = tape->bh;
2338 while (i) {
2339 if (bh == NULL) {
2340 printk(KERN_INFO "ide-tape: bug,"
2341 " bh NULL\n");
2342 break;
2343 }
2344 min = min(i, (unsigned int)(bh->b_size -
2345 atomic_read(&bh->b_count)));
2346 memset(bh->b_data + atomic_read(&bh->b_count),
2347 0, min);
2348 atomic_add(min, &bh->b_count);
2349 i -= min;
2350 bh = bh->b_reqnext;
2351 }
2352 }
2353 (void) idetape_add_chrdev_write_request(drive, blocks);
2354 tape->merge_stage_size = 0;
2355 }
2356 idetape_wait_for_pipeline(drive);
2357 if (tape->merge_stage != NULL) {
2358 __idetape_kfree_stage(tape->merge_stage);
2359 tape->merge_stage = NULL;
2360 }
2361 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
2362 tape->chrdev_dir = IDETAPE_DIR_NONE;
2363
2364 /*
2365 * On the next backup, perform the feedback loop again. (I don't want to
2366 * keep sense information between backups, as some systems are
2367 * constantly on, and the system load can be totally different on the
2368 * next backup).
2369 */
2370 tape->max_stages = tape->min_pipeline;
2371 if (tape->first_stage != NULL ||
2372 tape->next_stage != NULL ||
2373 tape->last_stage != NULL ||
2374 tape->nr_stages != 0) {
2375 printk(KERN_ERR "ide-tape: ide-tape pipeline bug, "
2376 "first_stage %p, next_stage %p, "
2377 "last_stage %p, nr_stages %d\n",
2378 tape->first_stage, tape->next_stage,
2379 tape->last_stage, tape->nr_stages);
2380 }
2381 }
2382
2383 static void idetape_restart_speed_control(ide_drive_t *drive)
2384 {
2385 idetape_tape_t *tape = drive->driver_data;
2386
2387 tape->restart_speed_control_req = 0;
2388 tape->pipeline_head = 0;
2389 tape->controlled_last_pipeline_head = 0;
2390 tape->controlled_previous_pipeline_head = 0;
2391 tape->uncontrolled_previous_pipeline_head = 0;
2392 tape->controlled_pipeline_head_speed = 5000;
2393 tape->pipeline_head_speed = 5000;
2394 tape->uncontrolled_pipeline_head_speed = 0;
2395 tape->controlled_pipeline_head_time =
2396 tape->uncontrolled_pipeline_head_time = jiffies;
2397 tape->controlled_previous_head_time =
2398 tape->uncontrolled_previous_head_time = jiffies;
2399 }
2400
2401 static int idetape_init_read(ide_drive_t *drive, int max_stages)
2402 {
2403 idetape_tape_t *tape = drive->driver_data;
2404 idetape_stage_t *new_stage;
2405 struct request rq;
2406 int bytes_read;
2407 u16 blocks = *(u16 *)&tape->caps[12];
2408
2409 /* Initialize read operation */
2410 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
2411 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
2412 idetape_empty_write_pipeline(drive);
2413 idetape_flush_tape_buffers(drive);
2414 }
2415 if (tape->merge_stage || tape->merge_stage_size) {
2416 printk(KERN_ERR "ide-tape: merge_stage_size should be"
2417 " 0 now\n");
2418 tape->merge_stage_size = 0;
2419 }
2420 tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
2421 if (!tape->merge_stage)
2422 return -ENOMEM;
2423 tape->chrdev_dir = IDETAPE_DIR_READ;
2424
2425 /*
2426 * Issue a read 0 command to ensure that DSC handshake is
2427 * switched from completion mode to buffer available mode.
2428 * No point in issuing this if DSC overlap isn't supported, some
2429 * drives (Seagate STT3401A) will return an error.
2430 */
2431 if (drive->dsc_overlap) {
2432 bytes_read = idetape_queue_rw_tail(drive,
2433 REQ_IDETAPE_READ, 0,
2434 tape->merge_stage->bh);
2435 if (bytes_read < 0) {
2436 __idetape_kfree_stage(tape->merge_stage);
2437 tape->merge_stage = NULL;
2438 tape->chrdev_dir = IDETAPE_DIR_NONE;
2439 return bytes_read;
2440 }
2441 }
2442 }
2443 if (tape->restart_speed_control_req)
2444 idetape_restart_speed_control(drive);
2445 idetape_init_rq(&rq, REQ_IDETAPE_READ);
2446 rq.sector = tape->first_frame;
2447 rq.nr_sectors = blocks;
2448 rq.current_nr_sectors = blocks;
2449 if (!test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags) &&
2450 tape->nr_stages < max_stages) {
2451 new_stage = idetape_kmalloc_stage(tape);
2452 while (new_stage != NULL) {
2453 new_stage->rq = rq;
2454 idetape_add_stage_tail(drive, new_stage);
2455 if (tape->nr_stages >= max_stages)
2456 break;
2457 new_stage = idetape_kmalloc_stage(tape);
2458 }
2459 }
2460 if (!test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
2461 if (tape->nr_pending_stages >= 3 * max_stages / 4) {
2462 tape->measure_insert_time = 1;
2463 tape->insert_time = jiffies;
2464 tape->insert_size = 0;
2465 tape->insert_speed = 0;
2466 idetape_plug_pipeline(drive);
2467 }
2468 }
2469 return 0;
2470 }
2471
2472 /*
2473 * Called from idetape_chrdev_read() to service a character device read request
2474 * and add read-ahead requests to our pipeline.
2475 */
2476 static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
2477 {
2478 idetape_tape_t *tape = drive->driver_data;
2479 unsigned long flags;
2480 struct request *rq_ptr;
2481 int bytes_read;
2482
2483 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
2484
2485 /* If we are at a filemark, return a read length of 0 */
2486 if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
2487 return 0;
2488
2489 /* Wait for the next block to reach the head of the pipeline. */
2490 idetape_init_read(drive, tape->max_stages);
2491 if (tape->first_stage == NULL) {
2492 if (test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
2493 return 0;
2494 return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
2495 tape->merge_stage->bh);
2496 }
2497 idetape_wait_first_stage(drive);
2498 rq_ptr = &tape->first_stage->rq;
2499 bytes_read = tape->blk_size * (rq_ptr->nr_sectors -
2500 rq_ptr->current_nr_sectors);
2501 rq_ptr->nr_sectors = 0;
2502 rq_ptr->current_nr_sectors = 0;
2503
2504 if (rq_ptr->errors == IDETAPE_ERROR_EOD)
2505 return 0;
2506 else {
2507 idetape_switch_buffers(tape, tape->first_stage);
2508 if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
2509 set_bit(IDETAPE_FLAG_FILEMARK, &tape->flags);
2510 spin_lock_irqsave(&tape->lock, flags);
2511 idetape_remove_stage_head(drive);
2512 spin_unlock_irqrestore(&tape->lock, flags);
2513 tape->pipeline_head++;
2514 idetape_calculate_speeds(drive);
2515 }
2516 if (bytes_read > blocks * tape->blk_size) {
2517 printk(KERN_ERR "ide-tape: bug: trying to return more bytes"
2518 " than requested\n");
2519 bytes_read = blocks * tape->blk_size;
2520 }
2521 return (bytes_read);
2522 }
2523
2524 static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
2525 {
2526 idetape_tape_t *tape = drive->driver_data;
2527 struct idetape_bh *bh;
2528 int blocks;
2529
2530 while (bcount) {
2531 unsigned int count;
2532
2533 bh = tape->merge_stage->bh;
2534 count = min(tape->stage_size, bcount);
2535 bcount -= count;
2536 blocks = count / tape->blk_size;
2537 while (count) {
2538 atomic_set(&bh->b_count,
2539 min(count, (unsigned int)bh->b_size));
2540 memset(bh->b_data, 0, atomic_read(&bh->b_count));
2541 count -= atomic_read(&bh->b_count);
2542 bh = bh->b_reqnext;
2543 }
2544 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
2545 tape->merge_stage->bh);
2546 }
2547 }
2548
2549 static int idetape_pipeline_size(ide_drive_t *drive)
2550 {
2551 idetape_tape_t *tape = drive->driver_data;
2552 idetape_stage_t *stage;
2553 struct request *rq;
2554 int size = 0;
2555
2556 idetape_wait_for_pipeline(drive);
2557 stage = tape->first_stage;
2558 while (stage != NULL) {
2559 rq = &stage->rq;
2560 size += tape->blk_size * (rq->nr_sectors -
2561 rq->current_nr_sectors);
2562 if (rq->errors == IDETAPE_ERROR_FILEMARK)
2563 size += tape->blk_size;
2564 stage = stage->next;
2565 }
2566 size += tape->merge_stage_size;
2567 return size;
2568 }
2569
2570 /*
2571 * Rewinds the tape to the Beginning Of the current Partition (BOP). We
2572 * currently support only one partition.
2573 */
2574 static int idetape_rewind_tape(ide_drive_t *drive)
2575 {
2576 int retval;
2577 struct ide_atapi_pc pc;
2578 idetape_tape_t *tape;
2579 tape = drive->driver_data;
2580
2581 debug_log(DBG_SENSE, "Enter %s\n", __func__);
2582
2583 idetape_create_rewind_cmd(drive, &pc);
2584 retval = idetape_queue_pc_tail(drive, &pc);
2585 if (retval)
2586 return retval;
2587
2588 idetape_create_read_position_cmd(&pc);
2589 retval = idetape_queue_pc_tail(drive, &pc);
2590 if (retval)
2591 return retval;
2592 return 0;
2593 }
2594
2595 /* mtio.h compatible commands should be issued to the chrdev interface. */
2596 static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
2597 unsigned long arg)
2598 {
2599 idetape_tape_t *tape = drive->driver_data;
2600 void __user *argp = (void __user *)arg;
2601
2602 struct idetape_config {
2603 int dsc_rw_frequency;
2604 int dsc_media_access_frequency;
2605 int nr_stages;
2606 } config;
2607
2608 debug_log(DBG_PROCS, "Enter %s\n", __func__);
2609
2610 switch (cmd) {
2611 case 0x0340:
2612 if (copy_from_user(&config, argp, sizeof(config)))
2613 return -EFAULT;
2614 tape->best_dsc_rw_freq = config.dsc_rw_frequency;
2615 tape->max_stages = config.nr_stages;
2616 break;
2617 case 0x0350:
2618 config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
2619 config.nr_stages = tape->max_stages;
2620 if (copy_to_user(argp, &config, sizeof(config)))
2621 return -EFAULT;
2622 break;
2623 default:
2624 return -EIO;
2625 }
2626 return 0;
2627 }
2628
2629 /*
2630 * The function below is now a bit more complicated than just passing the
2631 * command to the tape since we may have crossed some filemarks during our
2632 * pipelined read-ahead mode. As a minor side effect, the pipeline enables us to
2633 * support MTFSFM when the filemark is in our internal pipeline even if the tape
2634 * doesn't support spacing over filemarks in the reverse direction.
2635 */
2636 static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
2637 int mt_count)
2638 {
2639 idetape_tape_t *tape = drive->driver_data;
2640 struct ide_atapi_pc pc;
2641 unsigned long flags;
2642 int retval, count = 0;
2643 int sprev = !!(tape->caps[4] & 0x20);
2644
2645 if (mt_count == 0)
2646 return 0;
2647 if (MTBSF == mt_op || MTBSFM == mt_op) {
2648 if (!sprev)
2649 return -EIO;
2650 mt_count = -mt_count;
2651 }
2652
2653 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
2654 /* its a read-ahead buffer, scan it for crossed filemarks. */
2655 tape->merge_stage_size = 0;
2656 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
2657 ++count;
2658 while (tape->first_stage != NULL) {
2659 if (count == mt_count) {
2660 if (mt_op == MTFSFM)
2661 set_bit(IDETAPE_FLAG_FILEMARK,
2662 &tape->flags);
2663 return 0;
2664 }
2665 spin_lock_irqsave(&tape->lock, flags);
2666 if (tape->first_stage == tape->active_stage) {
2667 /*
2668 * We have reached the active stage in the read
2669 * pipeline. There is no point in allowing the
2670 * drive to continue reading any farther, so we
2671 * stop the pipeline.
2672 *
2673 * This section should be moved to a separate
2674 * subroutine because similar operations are
2675 * done in __idetape_discard_read_pipeline(),
2676 * for example.
2677 */
2678 tape->next_stage = NULL;
2679 spin_unlock_irqrestore(&tape->lock, flags);
2680 idetape_wait_first_stage(drive);
2681 tape->next_stage = tape->first_stage->next;
2682 } else
2683 spin_unlock_irqrestore(&tape->lock, flags);
2684 if (tape->first_stage->rq.errors ==
2685 IDETAPE_ERROR_FILEMARK)
2686 ++count;
2687 idetape_remove_stage_head(drive);
2688 }
2689 idetape_discard_read_pipeline(drive, 0);
2690 }
2691
2692 /*
2693 * The filemark was not found in our internal pipeline; now we can issue
2694 * the space command.
2695 */
2696 switch (mt_op) {
2697 case MTFSF:
2698 case MTBSF:
2699 idetape_create_space_cmd(&pc, mt_count - count,
2700 IDETAPE_SPACE_OVER_FILEMARK);
2701 return idetape_queue_pc_tail(drive, &pc);
2702 case MTFSFM:
2703 case MTBSFM:
2704 if (!sprev)
2705 return -EIO;
2706 retval = idetape_space_over_filemarks(drive, MTFSF,
2707 mt_count - count);
2708 if (retval)
2709 return retval;
2710 count = (MTBSFM == mt_op ? 1 : -1);
2711 return idetape_space_over_filemarks(drive, MTFSF, count);
2712 default:
2713 printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
2714 mt_op);
2715 return -EIO;
2716 }
2717 }
2718
2719 /*
2720 * Our character device read / write functions.
2721 *
2722 * The tape is optimized to maximize throughput when it is transferring an
2723 * integral number of the "continuous transfer limit", which is a parameter of
2724 * the specific tape (26kB on my particular tape, 32kB for Onstream).
2725 *
2726 * As of version 1.3 of the driver, the character device provides an abstract
2727 * continuous view of the media - any mix of block sizes (even 1 byte) on the
2728 * same backup/restore procedure is supported. The driver will internally
2729 * convert the requests to the recommended transfer unit, so that an unmatch
2730 * between the user's block size to the recommended size will only result in a
2731 * (slightly) increased driver overhead, but will no longer hit performance.
2732 * This is not applicable to Onstream.
2733 */
2734 static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
2735 size_t count, loff_t *ppos)
2736 {
2737 struct ide_tape_obj *tape = ide_tape_f(file);
2738 ide_drive_t *drive = tape->drive;
2739 ssize_t bytes_read, temp, actually_read = 0, rc;
2740 ssize_t ret = 0;
2741 u16 ctl = *(u16 *)&tape->caps[12];
2742
2743 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
2744
2745 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
2746 if (test_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags))
2747 if (count > tape->blk_size &&
2748 (count % tape->blk_size) == 0)
2749 tape->user_bs_factor = count / tape->blk_size;
2750 }
2751 rc = idetape_init_read(drive, tape->max_stages);
2752 if (rc < 0)
2753 return rc;
2754 if (count == 0)
2755 return (0);
2756 if (tape->merge_stage_size) {
2757 actually_read = min((unsigned int)(tape->merge_stage_size),
2758 (unsigned int)count);
2759 if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
2760 actually_read))
2761 ret = -EFAULT;
2762 buf += actually_read;
2763 tape->merge_stage_size -= actually_read;
2764 count -= actually_read;
2765 }
2766 while (count >= tape->stage_size) {
2767 bytes_read = idetape_add_chrdev_read_request(drive, ctl);
2768 if (bytes_read <= 0)
2769 goto finish;
2770 if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
2771 bytes_read))
2772 ret = -EFAULT;
2773 buf += bytes_read;
2774 count -= bytes_read;
2775 actually_read += bytes_read;
2776 }
2777 if (count) {
2778 bytes_read = idetape_add_chrdev_read_request(drive, ctl);
2779 if (bytes_read <= 0)
2780 goto finish;
2781 temp = min((unsigned long)count, (unsigned long)bytes_read);
2782 if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
2783 temp))
2784 ret = -EFAULT;
2785 actually_read += temp;
2786 tape->merge_stage_size = bytes_read-temp;
2787 }
2788 finish:
2789 if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) {
2790 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
2791
2792 idetape_space_over_filemarks(drive, MTFSF, 1);
2793 return 0;
2794 }
2795
2796 return ret ? ret : actually_read;
2797 }
2798
2799 static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
2800 size_t count, loff_t *ppos)
2801 {
2802 struct ide_tape_obj *tape = ide_tape_f(file);
2803 ide_drive_t *drive = tape->drive;
2804 ssize_t actually_written = 0;
2805 ssize_t ret = 0;
2806 u16 ctl = *(u16 *)&tape->caps[12];
2807
2808 /* The drive is write protected. */
2809 if (tape->write_prot)
2810 return -EACCES;
2811
2812 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
2813
2814 /* Initialize write operation */
2815 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
2816 if (tape->chrdev_dir == IDETAPE_DIR_READ)
2817 idetape_discard_read_pipeline(drive, 1);
2818 if (tape->merge_stage || tape->merge_stage_size) {
2819 printk(KERN_ERR "ide-tape: merge_stage_size "
2820 "should be 0 now\n");
2821 tape->merge_stage_size = 0;
2822 }
2823 tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
2824 if (!tape->merge_stage)
2825 return -ENOMEM;
2826 tape->chrdev_dir = IDETAPE_DIR_WRITE;
2827 idetape_init_merge_stage(tape);
2828
2829 /*
2830 * Issue a write 0 command to ensure that DSC handshake is
2831 * switched from completion mode to buffer available mode. No
2832 * point in issuing this if DSC overlap isn't supported, some
2833 * drives (Seagate STT3401A) will return an error.
2834 */
2835 if (drive->dsc_overlap) {
2836 ssize_t retval = idetape_queue_rw_tail(drive,
2837 REQ_IDETAPE_WRITE, 0,
2838 tape->merge_stage->bh);
2839 if (retval < 0) {
2840 __idetape_kfree_stage(tape->merge_stage);
2841 tape->merge_stage = NULL;
2842 tape->chrdev_dir = IDETAPE_DIR_NONE;
2843 return retval;
2844 }
2845 }
2846 }
2847 if (count == 0)
2848 return (0);
2849 if (tape->restart_speed_control_req)
2850 idetape_restart_speed_control(drive);
2851 if (tape->merge_stage_size) {
2852 if (tape->merge_stage_size >= tape->stage_size) {
2853 printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
2854 tape->merge_stage_size = 0;
2855 }
2856 actually_written = min((unsigned int)
2857 (tape->stage_size - tape->merge_stage_size),
2858 (unsigned int)count);
2859 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
2860 actually_written))
2861 ret = -EFAULT;
2862 buf += actually_written;
2863 tape->merge_stage_size += actually_written;
2864 count -= actually_written;
2865
2866 if (tape->merge_stage_size == tape->stage_size) {
2867 ssize_t retval;
2868 tape->merge_stage_size = 0;
2869 retval = idetape_add_chrdev_write_request(drive, ctl);
2870 if (retval <= 0)
2871 return (retval);
2872 }
2873 }
2874 while (count >= tape->stage_size) {
2875 ssize_t retval;
2876 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
2877 tape->stage_size))
2878 ret = -EFAULT;
2879 buf += tape->stage_size;
2880 count -= tape->stage_size;
2881 retval = idetape_add_chrdev_write_request(drive, ctl);
2882 actually_written += tape->stage_size;
2883 if (retval <= 0)
2884 return (retval);
2885 }
2886 if (count) {
2887 actually_written += count;
2888 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
2889 count))
2890 ret = -EFAULT;
2891 tape->merge_stage_size += count;
2892 }
2893 return ret ? ret : actually_written;
2894 }
2895
2896 static int idetape_write_filemark(ide_drive_t *drive)
2897 {
2898 struct ide_atapi_pc pc;
2899
2900 /* Write a filemark */
2901 idetape_create_write_filemark_cmd(drive, &pc, 1);
2902 if (idetape_queue_pc_tail(drive, &pc)) {
2903 printk(KERN_ERR "ide-tape: Couldn't write a filemark\n");
2904 return -EIO;
2905 }
2906 return 0;
2907 }
2908
2909 /*
2910 * Called from idetape_chrdev_ioctl when the general mtio MTIOCTOP ioctl is
2911 * requested.
2912 *
2913 * Note: MTBSF and MTBSFM are not supported when the tape doesn't support
2914 * spacing over filemarks in the reverse direction. In this case, MTFSFM is also
2915 * usually not supported (it is supported in the rare case in which we crossed
2916 * the filemark during our read-ahead pipelined operation mode).
2917 *
2918 * The following commands are currently not supported:
2919 *
2920 * MTFSS, MTBSS, MTWSM, MTSETDENSITY, MTSETDRVBUFFER, MT_ST_BOOLEANS,
2921 * MT_ST_WRITE_THRESHOLD.
2922 */
2923 static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2924 {
2925 idetape_tape_t *tape = drive->driver_data;
2926 struct ide_atapi_pc pc;
2927 int i, retval;
2928
2929 debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
2930 mt_op, mt_count);
2931
2932 /* Commands which need our pipelined read-ahead stages. */
2933 switch (mt_op) {
2934 case MTFSF:
2935 case MTFSFM:
2936 case MTBSF:
2937 case MTBSFM:
2938 if (!mt_count)
2939 return 0;
2940 return idetape_space_over_filemarks(drive, mt_op, mt_count);
2941 default:
2942 break;
2943 }
2944
2945 switch (mt_op) {
2946 case MTWEOF:
2947 if (tape->write_prot)
2948 return -EACCES;
2949 idetape_discard_read_pipeline(drive, 1);
2950 for (i = 0; i < mt_count; i++) {
2951 retval = idetape_write_filemark(drive);
2952 if (retval)
2953 return retval;
2954 }
2955 return 0;
2956 case MTREW:
2957 idetape_discard_read_pipeline(drive, 0);
2958 if (idetape_rewind_tape(drive))
2959 return -EIO;
2960 return 0;
2961 case MTLOAD:
2962 idetape_discard_read_pipeline(drive, 0);
2963 idetape_create_load_unload_cmd(drive, &pc,
2964 IDETAPE_LU_LOAD_MASK);
2965 return idetape_queue_pc_tail(drive, &pc);
2966 case MTUNLOAD:
2967 case MTOFFL:
2968 /*
2969 * If door is locked, attempt to unlock before
2970 * attempting to eject.
2971 */
2972 if (tape->door_locked) {
2973 if (idetape_create_prevent_cmd(drive, &pc, 0))
2974 if (!idetape_queue_pc_tail(drive, &pc))
2975 tape->door_locked = DOOR_UNLOCKED;
2976 }
2977 idetape_discard_read_pipeline(drive, 0);
2978 idetape_create_load_unload_cmd(drive, &pc,
2979 !IDETAPE_LU_LOAD_MASK);
2980 retval = idetape_queue_pc_tail(drive, &pc);
2981 if (!retval)
2982 clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
2983 return retval;
2984 case MTNOP:
2985 idetape_discard_read_pipeline(drive, 0);
2986 return idetape_flush_tape_buffers(drive);
2987 case MTRETEN:
2988 idetape_discard_read_pipeline(drive, 0);
2989 idetape_create_load_unload_cmd(drive, &pc,
2990 IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
2991 return idetape_queue_pc_tail(drive, &pc);
2992 case MTEOM:
2993 idetape_create_space_cmd(&pc, 0, IDETAPE_SPACE_TO_EOD);
2994 return idetape_queue_pc_tail(drive, &pc);
2995 case MTERASE:
2996 (void)idetape_rewind_tape(drive);
2997 idetape_create_erase_cmd(&pc);
2998 return idetape_queue_pc_tail(drive, &pc);
2999 case MTSETBLK:
3000 if (mt_count) {
3001 if (mt_count < tape->blk_size ||
3002 mt_count % tape->blk_size)
3003 return -EIO;
3004 tape->user_bs_factor = mt_count / tape->blk_size;
3005 clear_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
3006 } else
3007 set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
3008 return 0;
3009 case MTSEEK:
3010 idetape_discard_read_pipeline(drive, 0);
3011 return idetape_position_tape(drive,
3012 mt_count * tape->user_bs_factor, tape->partition, 0);
3013 case MTSETPART:
3014 idetape_discard_read_pipeline(drive, 0);
3015 return idetape_position_tape(drive, 0, mt_count, 0);
3016 case MTFSR:
3017 case MTBSR:
3018 case MTLOCK:
3019 if (!idetape_create_prevent_cmd(drive, &pc, 1))
3020 return 0;
3021 retval = idetape_queue_pc_tail(drive, &pc);
3022 if (retval)
3023 return retval;
3024 tape->door_locked = DOOR_EXPLICITLY_LOCKED;
3025 return 0;
3026 case MTUNLOCK:
3027 if (!idetape_create_prevent_cmd(drive, &pc, 0))
3028 return 0;
3029 retval = idetape_queue_pc_tail(drive, &pc);
3030 if (retval)
3031 return retval;
3032 tape->door_locked = DOOR_UNLOCKED;
3033 return 0;
3034 default:
3035 printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
3036 mt_op);
3037 return -EIO;
3038 }
3039 }
3040
3041 /*
3042 * Our character device ioctls. General mtio.h magnetic io commands are
3043 * supported here, and not in the corresponding block interface. Our own
3044 * ide-tape ioctls are supported on both interfaces.
3045 */
3046 static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
3047 unsigned int cmd, unsigned long arg)
3048 {
3049 struct ide_tape_obj *tape = ide_tape_f(file);
3050 ide_drive_t *drive = tape->drive;
3051 struct mtop mtop;
3052 struct mtget mtget;
3053 struct mtpos mtpos;
3054 int block_offset = 0, position = tape->first_frame;
3055 void __user *argp = (void __user *)arg;
3056
3057 debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd);
3058
3059 tape->restart_speed_control_req = 1;
3060 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
3061 idetape_empty_write_pipeline(drive);
3062 idetape_flush_tape_buffers(drive);
3063 }
3064 if (cmd == MTIOCGET || cmd == MTIOCPOS) {
3065 block_offset = idetape_pipeline_size(drive) /
3066 (tape->blk_size * tape->user_bs_factor);
3067 position = idetape_read_position(drive);
3068 if (position < 0)
3069 return -EIO;
3070 }
3071 switch (cmd) {
3072 case MTIOCTOP:
3073 if (copy_from_user(&mtop, argp, sizeof(struct mtop)))
3074 return -EFAULT;
3075 return idetape_mtioctop(drive, mtop.mt_op, mtop.mt_count);
3076 case MTIOCGET:
3077 memset(&mtget, 0, sizeof(struct mtget));
3078 mtget.mt_type = MT_ISSCSI2;
3079 mtget.mt_blkno = position / tape->user_bs_factor - block_offset;
3080 mtget.mt_dsreg =
3081 ((tape->blk_size * tape->user_bs_factor)
3082 << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK;
3083
3084 if (tape->drv_write_prot)
3085 mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
3086
3087 if (copy_to_user(argp, &mtget, sizeof(struct mtget)))
3088 return -EFAULT;
3089 return 0;
3090 case MTIOCPOS:
3091 mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
3092 if (copy_to_user(argp, &mtpos, sizeof(struct mtpos)))
3093 return -EFAULT;
3094 return 0;
3095 default:
3096 if (tape->chrdev_dir == IDETAPE_DIR_READ)
3097 idetape_discard_read_pipeline(drive, 1);
3098 return idetape_blkdev_ioctl(drive, cmd, arg);
3099 }
3100 }
3101
3102 /*
3103 * Do a mode sense page 0 with block descriptor and if it succeeds set the tape
3104 * block size with the reported value.
3105 */
3106 static void ide_tape_get_bsize_from_bdesc(ide_drive_t *drive)
3107 {
3108 idetape_tape_t *tape = drive->driver_data;
3109 struct ide_atapi_pc pc;
3110
3111 idetape_create_mode_sense_cmd(&pc, IDETAPE_BLOCK_DESCRIPTOR);
3112 if (idetape_queue_pc_tail(drive, &pc)) {
3113 printk(KERN_ERR "ide-tape: Can't get block descriptor\n");
3114 if (tape->blk_size == 0) {
3115 printk(KERN_WARNING "ide-tape: Cannot deal with zero "
3116 "block size, assuming 32k\n");
3117 tape->blk_size = 32768;
3118 }
3119 return;
3120 }
3121 tape->blk_size = (pc.buf[4 + 5] << 16) +
3122 (pc.buf[4 + 6] << 8) +
3123 pc.buf[4 + 7];
3124 tape->drv_write_prot = (pc.buf[2] & 0x80) >> 7;
3125 }
3126
3127 static int idetape_chrdev_open(struct inode *inode, struct file *filp)
3128 {
3129 unsigned int minor = iminor(inode), i = minor & ~0xc0;
3130 ide_drive_t *drive;
3131 idetape_tape_t *tape;
3132 struct ide_atapi_pc pc;
3133 int retval;
3134
3135 if (i >= MAX_HWIFS * MAX_DRIVES)
3136 return -ENXIO;
3137
3138 tape = ide_tape_chrdev_get(i);
3139 if (!tape)
3140 return -ENXIO;
3141
3142 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
3143
3144 /*
3145 * We really want to do nonseekable_open(inode, filp); here, but some
3146 * versions of tar incorrectly call lseek on tapes and bail out if that
3147 * fails. So we disallow pread() and pwrite(), but permit lseeks.
3148 */
3149 filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
3150
3151 drive = tape->drive;
3152
3153 filp->private_data = tape;
3154
3155 if (test_and_set_bit(IDETAPE_FLAG_BUSY, &tape->flags)) {
3156 retval = -EBUSY;
3157 goto out_put_tape;
3158 }
3159
3160 retval = idetape_wait_ready(drive, 60 * HZ);
3161 if (retval) {
3162 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
3163 printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
3164 goto out_put_tape;
3165 }
3166
3167 idetape_read_position(drive);
3168 if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags))
3169 (void)idetape_rewind_tape(drive);
3170
3171 if (tape->chrdev_dir != IDETAPE_DIR_READ)
3172 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
3173
3174 /* Read block size and write protect status from drive. */
3175 ide_tape_get_bsize_from_bdesc(drive);
3176
3177 /* Set write protect flag if device is opened as read-only. */
3178 if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
3179 tape->write_prot = 1;
3180 else
3181 tape->write_prot = tape->drv_write_prot;
3182
3183 /* Make sure drive isn't write protected if user wants to write. */
3184 if (tape->write_prot) {
3185 if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
3186 (filp->f_flags & O_ACCMODE) == O_RDWR) {
3187 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
3188 retval = -EROFS;
3189 goto out_put_tape;
3190 }
3191 }
3192
3193 /* Lock the tape drive door so user can't eject. */
3194 if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
3195 if (idetape_create_prevent_cmd(drive, &pc, 1)) {
3196 if (!idetape_queue_pc_tail(drive, &pc)) {
3197 if (tape->door_locked != DOOR_EXPLICITLY_LOCKED)
3198 tape->door_locked = DOOR_LOCKED;
3199 }
3200 }
3201 }
3202 idetape_restart_speed_control(drive);
3203 tape->restart_speed_control_req = 0;
3204 return 0;
3205
3206 out_put_tape:
3207 ide_tape_put(tape);
3208 return retval;
3209 }
3210
3211 static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
3212 {
3213 idetape_tape_t *tape = drive->driver_data;
3214
3215 idetape_empty_write_pipeline(drive);
3216 tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0);
3217 if (tape->merge_stage != NULL) {
3218 idetape_pad_zeros(drive, tape->blk_size *
3219 (tape->user_bs_factor - 1));
3220 __idetape_kfree_stage(tape->merge_stage);
3221 tape->merge_stage = NULL;
3222 }
3223 idetape_write_filemark(drive);
3224 idetape_flush_tape_buffers(drive);
3225 idetape_flush_tape_buffers(drive);
3226 }
3227
3228 static int idetape_chrdev_release(struct inode *inode, struct file *filp)
3229 {
3230 struct ide_tape_obj *tape = ide_tape_f(filp);
3231 ide_drive_t *drive = tape->drive;
3232 struct ide_atapi_pc pc;
3233 unsigned int minor = iminor(inode);
3234
3235 lock_kernel();
3236 tape = drive->driver_data;
3237
3238 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
3239
3240 if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
3241 idetape_write_release(drive, minor);
3242 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
3243 if (minor < 128)
3244 idetape_discard_read_pipeline(drive, 1);
3245 else
3246 idetape_wait_for_pipeline(drive);
3247 }
3248 if (tape->cache_stage != NULL) {
3249 __idetape_kfree_stage(tape->cache_stage);
3250 tape->cache_stage = NULL;
3251 }
3252 if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags))
3253 (void) idetape_rewind_tape(drive);
3254 if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
3255 if (tape->door_locked == DOOR_LOCKED) {
3256 if (idetape_create_prevent_cmd(drive, &pc, 0)) {
3257 if (!idetape_queue_pc_tail(drive, &pc))
3258 tape->door_locked = DOOR_UNLOCKED;
3259 }
3260 }
3261 }
3262 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
3263 ide_tape_put(tape);
3264 unlock_kernel();
3265 return 0;
3266 }
3267
3268 /*
3269 * check the contents of the ATAPI IDENTIFY command results. We return:
3270 *
3271 * 1 - If the tape can be supported by us, based on the information we have so
3272 * far.
3273 *
3274 * 0 - If this tape driver is not currently supported by us.
3275 */
3276 static int idetape_identify_device(ide_drive_t *drive)
3277 {
3278 u8 gcw[2], protocol, device_type, removable, packet_size;
3279
3280 if (drive->id_read == 0)
3281 return 1;
3282
3283 *((unsigned short *) &gcw) = drive->id->config;
3284
3285 protocol = (gcw[1] & 0xC0) >> 6;
3286 device_type = gcw[1] & 0x1F;
3287 removable = !!(gcw[0] & 0x80);
3288 packet_size = gcw[0] & 0x3;
3289
3290 /* Check that we can support this device */
3291 if (protocol != 2)
3292 printk(KERN_ERR "ide-tape: Protocol (0x%02x) is not ATAPI\n",
3293 protocol);
3294 else if (device_type != 1)
3295 printk(KERN_ERR "ide-tape: Device type (0x%02x) is not set "
3296 "to tape\n", device_type);
3297 else if (!removable)
3298 printk(KERN_ERR "ide-tape: The removable flag is not set\n");
3299 else if (packet_size != 0) {
3300 printk(KERN_ERR "ide-tape: Packet size (0x%02x) is not 12"
3301 " bytes\n", packet_size);
3302 } else
3303 return 1;
3304 return 0;
3305 }
3306
3307 static void idetape_get_inquiry_results(ide_drive_t *drive)
3308 {
3309 idetape_tape_t *tape = drive->driver_data;
3310 struct ide_atapi_pc pc;
3311 char fw_rev[6], vendor_id[10], product_id[18];
3312
3313 idetape_create_inquiry_cmd(&pc);
3314 if (idetape_queue_pc_tail(drive, &pc)) {
3315 printk(KERN_ERR "ide-tape: %s: can't get INQUIRY results\n",
3316 tape->name);
3317 return;
3318 }
3319 memcpy(vendor_id, &pc.buf[8], 8);
3320 memcpy(product_id, &pc.buf[16], 16);
3321 memcpy(fw_rev, &pc.buf[32], 4);
3322
3323 ide_fixstring(vendor_id, 10, 0);
3324 ide_fixstring(product_id, 18, 0);
3325 ide_fixstring(fw_rev, 6, 0);
3326
3327 printk(KERN_INFO "ide-tape: %s <-> %s: %s %s rev %s\n",
3328 drive->name, tape->name, vendor_id, product_id, fw_rev);
3329 }
3330
3331 /*
3332 * Ask the tape about its various parameters. In particular, we will adjust our
3333 * data transfer buffer size to the recommended value as returned by the tape.
3334 */
3335 static void idetape_get_mode_sense_results(ide_drive_t *drive)
3336 {
3337 idetape_tape_t *tape = drive->driver_data;
3338 struct ide_atapi_pc pc;
3339 u8 *caps;
3340 u8 speed, max_speed;
3341
3342 idetape_create_mode_sense_cmd(&pc, IDETAPE_CAPABILITIES_PAGE);
3343 if (idetape_queue_pc_tail(drive, &pc)) {
3344 printk(KERN_ERR "ide-tape: Can't get tape parameters - assuming"
3345 " some default values\n");
3346 tape->blk_size = 512;
3347 put_unaligned(52, (u16 *)&tape->caps[12]);
3348 put_unaligned(540, (u16 *)&tape->caps[14]);
3349 put_unaligned(6*52, (u16 *)&tape->caps[16]);
3350 return;
3351 }
3352 caps = pc.buf + 4 + pc.buf[3];
3353
3354 /* convert to host order and save for later use */
3355 speed = be16_to_cpu(*(u16 *)&caps[14]);
3356 max_speed = be16_to_cpu(*(u16 *)&caps[8]);
3357
3358 put_unaligned(max_speed, (u16 *)&caps[8]);
3359 put_unaligned(be16_to_cpu(*(u16 *)&caps[12]), (u16 *)&caps[12]);
3360 put_unaligned(speed, (u16 *)&caps[14]);
3361 put_unaligned(be16_to_cpu(*(u16 *)&caps[16]), (u16 *)&caps[16]);
3362
3363 if (!speed) {
3364 printk(KERN_INFO "ide-tape: %s: invalid tape speed "
3365 "(assuming 650KB/sec)\n", drive->name);
3366 put_unaligned(650, (u16 *)&caps[14]);
3367 }
3368 if (!max_speed) {
3369 printk(KERN_INFO "ide-tape: %s: invalid max_speed "
3370 "(assuming 650KB/sec)\n", drive->name);
3371 put_unaligned(650, (u16 *)&caps[8]);
3372 }
3373
3374 memcpy(&tape->caps, caps, 20);
3375 if (caps[7] & 0x02)
3376 tape->blk_size = 512;
3377 else if (caps[7] & 0x04)
3378 tape->blk_size = 1024;
3379 }
3380
3381 #ifdef CONFIG_IDE_PROC_FS
3382 static void idetape_add_settings(ide_drive_t *drive)
3383 {
3384 idetape_tape_t *tape = drive->driver_data;
3385
3386 ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff,
3387 1, 2, (u16 *)&tape->caps[16], NULL);
3388 ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff,
3389 tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
3390 ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff,
3391 tape->stage_size / 1024, 1, &tape->max_stages, NULL);
3392 ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff,
3393 tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
3394 ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0,
3395 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages,
3396 NULL);
3397 ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0,
3398 0xffff, tape->stage_size / 1024, 1,
3399 &tape->nr_pending_stages, NULL);
3400 ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff,
3401 1, 1, (u16 *)&tape->caps[14], NULL);
3402 ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1,
3403 1024, &tape->stage_size, NULL);
3404 ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN,
3405 IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq,
3406 NULL);
3407 ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1,
3408 1, &drive->dsc_overlap, NULL);
3409 ide_add_setting(drive, "pipeline_head_speed_c", SETTING_READ, TYPE_INT,
3410 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed,
3411 NULL);
3412 ide_add_setting(drive, "pipeline_head_speed_u", SETTING_READ, TYPE_INT,
3413 0, 0xffff, 1, 1,
3414 &tape->uncontrolled_pipeline_head_speed, NULL);
3415 ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff,
3416 1, 1, &tape->avg_speed, NULL);
3417 ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1,
3418 1, &tape->debug_mask, NULL);
3419 }
3420 #else
3421 static inline void idetape_add_settings(ide_drive_t *drive) { ; }
3422 #endif
3423
3424 /*
3425 * The function below is called to:
3426 *
3427 * 1. Initialize our various state variables.
3428 * 2. Ask the tape for its capabilities.
3429 * 3. Allocate a buffer which will be used for data transfer. The buffer size
3430 * is chosen based on the recommendation which we received in step 2.
3431 *
3432 * Note that at this point ide.c already assigned us an irq, so that we can
3433 * queue requests here and wait for their completion.
3434 */
3435 static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
3436 {
3437 unsigned long t1, tmid, tn, t;
3438 int speed;
3439 int stage_size;
3440 u8 gcw[2];
3441 struct sysinfo si;
3442 u16 *ctl = (u16 *)&tape->caps[12];
3443
3444 spin_lock_init(&tape->lock);
3445 drive->dsc_overlap = 1;
3446 if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
3447 printk(KERN_INFO "ide-tape: %s: disabling DSC overlap\n",
3448 tape->name);
3449 drive->dsc_overlap = 0;
3450 }
3451 /* Seagate Travan drives do not support DSC overlap. */
3452 if (strstr(drive->id->model, "Seagate STT3401"))
3453 drive->dsc_overlap = 0;
3454 tape->minor = minor;
3455 tape->name[0] = 'h';
3456 tape->name[1] = 't';
3457 tape->name[2] = '0' + minor;
3458 tape->chrdev_dir = IDETAPE_DIR_NONE;
3459 tape->pc = tape->pc_stack;
3460 tape->max_insert_speed = 10000;
3461 tape->speed_control = 1;
3462 *((unsigned short *) &gcw) = drive->id->config;
3463
3464 /* Command packet DRQ type */
3465 if (((gcw[0] & 0x60) >> 5) == 1)
3466 set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags);
3467
3468 tape->min_pipeline = 10;
3469 tape->max_pipeline = 10;
3470 tape->max_stages = 10;
3471
3472 idetape_get_inquiry_results(drive);
3473 idetape_get_mode_sense_results(drive);
3474 ide_tape_get_bsize_from_bdesc(drive);
3475 tape->user_bs_factor = 1;
3476 tape->stage_size = *ctl * tape->blk_size;
3477 while (tape->stage_size > 0xffff) {
3478 printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
3479 *ctl /= 2;
3480 tape->stage_size = *ctl * tape->blk_size;
3481 }
3482 stage_size = tape->stage_size;
3483 tape->pages_per_stage = stage_size / PAGE_SIZE;
3484 if (stage_size % PAGE_SIZE) {
3485 tape->pages_per_stage++;
3486 tape->excess_bh_size = PAGE_SIZE - stage_size % PAGE_SIZE;
3487 }
3488
3489 /* Select the "best" DSC read/write polling freq and pipeline size. */
3490 speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
3491
3492 tape->max_stages = speed * 1000 * 10 / tape->stage_size;
3493
3494 /* Limit memory use for pipeline to 10% of physical memory */
3495 si_meminfo(&si);
3496 if (tape->max_stages * tape->stage_size >
3497 si.totalram * si.mem_unit / 10)
3498 tape->max_stages =
3499 si.totalram * si.mem_unit / (10 * tape->stage_size);
3500
3501 tape->max_stages = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES);
3502 tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES);
3503 tape->max_pipeline =
3504 min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
3505 if (tape->max_stages == 0) {
3506 tape->max_stages = 1;
3507 tape->min_pipeline = 1;
3508 tape->max_pipeline = 1;
3509 }
3510
3511 t1 = (tape->stage_size * HZ) / (speed * 1000);
3512 tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125);
3513 tn = (IDETAPE_FIFO_THRESHOLD * tape->stage_size * HZ) / (speed * 1000);
3514
3515 if (tape->max_stages)
3516 t = tn;
3517 else
3518 t = t1;
3519
3520 /*
3521 * Ensure that the number we got makes sense; limit it within
3522 * IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX.
3523 */
3524 tape->best_dsc_rw_freq = max_t(unsigned long,
3525 min_t(unsigned long, t, IDETAPE_DSC_RW_MAX),
3526 IDETAPE_DSC_RW_MIN);
3527 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
3528 "%dkB pipeline, %lums tDSC%s\n",
3529 drive->name, tape->name, *(u16 *)&tape->caps[14],
3530 (*(u16 *)&tape->caps[16] * 512) / tape->stage_size,
3531 tape->stage_size / 1024,
3532 tape->max_stages * tape->stage_size / 1024,
3533 tape->best_dsc_rw_freq * 1000 / HZ,
3534 drive->using_dma ? ", DMA":"");
3535
3536 idetape_add_settings(drive);
3537 }
3538
3539 static void ide_tape_remove(ide_drive_t *drive)
3540 {
3541 idetape_tape_t *tape = drive->driver_data;
3542
3543 ide_proc_unregister_driver(drive, tape->driver);
3544
3545 ide_unregister_region(tape->disk);
3546
3547 ide_tape_put(tape);
3548 }
3549
3550 static void ide_tape_release(struct kref *kref)
3551 {
3552 struct ide_tape_obj *tape = to_ide_tape(kref);
3553 ide_drive_t *drive = tape->drive;
3554 struct gendisk *g = tape->disk;
3555
3556 BUG_ON(tape->first_stage != NULL || tape->merge_stage_size);
3557
3558 drive->dsc_overlap = 0;
3559 drive->driver_data = NULL;
3560 device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor));
3561 device_destroy(idetape_sysfs_class,
3562 MKDEV(IDETAPE_MAJOR, tape->minor + 128));
3563 idetape_devs[tape->minor] = NULL;
3564 g->private_data = NULL;
3565 put_disk(g);
3566 kfree(tape);
3567 }
3568
3569 #ifdef CONFIG_IDE_PROC_FS
3570 static int proc_idetape_read_name
3571 (char *page, char **start, off_t off, int count, int *eof, void *data)
3572 {
3573 ide_drive_t *drive = (ide_drive_t *) data;
3574 idetape_tape_t *tape = drive->driver_data;
3575 char *out = page;
3576 int len;
3577
3578 len = sprintf(out, "%s\n", tape->name);
3579 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
3580 }
3581
3582 static ide_proc_entry_t idetape_proc[] = {
3583 { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
3584 { "name", S_IFREG|S_IRUGO, proc_idetape_read_name, NULL },
3585 { NULL, 0, NULL, NULL }
3586 };
3587 #endif
3588
3589 static int ide_tape_probe(ide_drive_t *);
3590
3591 static ide_driver_t idetape_driver = {
3592 .gen_driver = {
3593 .owner = THIS_MODULE,
3594 .name = "ide-tape",
3595 .bus = &ide_bus_type,
3596 },
3597 .probe = ide_tape_probe,
3598 .remove = ide_tape_remove,
3599 .version = IDETAPE_VERSION,
3600 .media = ide_tape,
3601 .supports_dsc_overlap = 1,
3602 .do_request = idetape_do_request,
3603 .end_request = idetape_end_request,
3604 .error = __ide_error,
3605 .abort = __ide_abort,
3606 #ifdef CONFIG_IDE_PROC_FS
3607 .proc = idetape_proc,
3608 #endif
3609 };
3610
3611 /* Our character device supporting functions, passed to register_chrdev. */
3612 static const struct file_operations idetape_fops = {
3613 .owner = THIS_MODULE,
3614 .read = idetape_chrdev_read,
3615 .write = idetape_chrdev_write,
3616 .ioctl = idetape_chrdev_ioctl,
3617 .open = idetape_chrdev_open,
3618 .release = idetape_chrdev_release,
3619 };
3620
3621 static int idetape_open(struct inode *inode, struct file *filp)
3622 {
3623 struct gendisk *disk = inode->i_bdev->bd_disk;
3624 struct ide_tape_obj *tape;
3625
3626 tape = ide_tape_get(disk);
3627 if (!tape)
3628 return -ENXIO;
3629
3630 return 0;
3631 }
3632
3633 static int idetape_release(struct inode *inode, struct file *filp)
3634 {
3635 struct gendisk *disk = inode->i_bdev->bd_disk;
3636 struct ide_tape_obj *tape = ide_tape_g(disk);
3637
3638 ide_tape_put(tape);
3639
3640 return 0;
3641 }
3642
3643 static int idetape_ioctl(struct inode *inode, struct file *file,
3644 unsigned int cmd, unsigned long arg)
3645 {
3646 struct block_device *bdev = inode->i_bdev;
3647 struct ide_tape_obj *tape = ide_tape_g(bdev->bd_disk);
3648 ide_drive_t *drive = tape->drive;
3649 int err = generic_ide_ioctl(drive, file, bdev, cmd, arg);
3650 if (err == -EINVAL)
3651 err = idetape_blkdev_ioctl(drive, cmd, arg);
3652 return err;
3653 }
3654
3655 static struct block_device_operations idetape_block_ops = {
3656 .owner = THIS_MODULE,
3657 .open = idetape_open,
3658 .release = idetape_release,
3659 .ioctl = idetape_ioctl,
3660 };
3661
3662 static int ide_tape_probe(ide_drive_t *drive)
3663 {
3664 idetape_tape_t *tape;
3665 struct gendisk *g;
3666 int minor;
3667
3668 if (!strstr("ide-tape", drive->driver_req))
3669 goto failed;
3670 if (!drive->present)
3671 goto failed;
3672 if (drive->media != ide_tape)
3673 goto failed;
3674 if (!idetape_identify_device(drive)) {
3675 printk(KERN_ERR "ide-tape: %s: not supported by this version of"
3676 " the driver\n", drive->name);
3677 goto failed;
3678 }
3679 if (drive->scsi) {
3680 printk(KERN_INFO "ide-tape: passing drive %s to ide-scsi"
3681 " emulation.\n", drive->name);
3682 goto failed;
3683 }
3684 tape = kzalloc(sizeof(idetape_tape_t), GFP_KERNEL);
3685 if (tape == NULL) {
3686 printk(KERN_ERR "ide-tape: %s: Can't allocate a tape struct\n",
3687 drive->name);
3688 goto failed;
3689 }
3690
3691 g = alloc_disk(1 << PARTN_BITS);
3692 if (!g)
3693 goto out_free_tape;
3694
3695 ide_init_disk(g, drive);
3696
3697 ide_proc_register_driver(drive, &idetape_driver);
3698
3699 kref_init(&tape->kref);
3700
3701 tape->drive = drive;
3702 tape->driver = &idetape_driver;
3703 tape->disk = g;
3704
3705 g->private_data = &tape->driver;
3706
3707 drive->driver_data = tape;
3708
3709 mutex_lock(&idetape_ref_mutex);
3710 for (minor = 0; idetape_devs[minor]; minor++)
3711 ;
3712 idetape_devs[minor] = tape;
3713 mutex_unlock(&idetape_ref_mutex);
3714
3715 idetape_setup(drive, tape, minor);
3716
3717 device_create(idetape_sysfs_class, &drive->gendev,
3718 MKDEV(IDETAPE_MAJOR, minor), "%s", tape->name);
3719 device_create(idetape_sysfs_class, &drive->gendev,
3720 MKDEV(IDETAPE_MAJOR, minor + 128), "n%s", tape->name);
3721
3722 g->fops = &idetape_block_ops;
3723 ide_register_region(g);
3724
3725 return 0;
3726
3727 out_free_tape:
3728 kfree(tape);
3729 failed:
3730 return -ENODEV;
3731 }
3732
3733 static void __exit idetape_exit(void)
3734 {
3735 driver_unregister(&idetape_driver.gen_driver);
3736 class_destroy(idetape_sysfs_class);
3737 unregister_chrdev(IDETAPE_MAJOR, "ht");
3738 }
3739
3740 static int __init idetape_init(void)
3741 {
3742 int error = 1;
3743 idetape_sysfs_class = class_create(THIS_MODULE, "ide_tape");
3744 if (IS_ERR(idetape_sysfs_class)) {
3745 idetape_sysfs_class = NULL;
3746 printk(KERN_ERR "Unable to create sysfs class for ide tapes\n");
3747 error = -EBUSY;
3748 goto out;
3749 }
3750
3751 if (register_chrdev(IDETAPE_MAJOR, "ht", &idetape_fops)) {
3752 printk(KERN_ERR "ide-tape: Failed to register chrdev"
3753 " interface\n");
3754 error = -EBUSY;
3755 goto out_free_class;
3756 }
3757
3758 error = driver_register(&idetape_driver.gen_driver);
3759 if (error)
3760 goto out_free_driver;
3761
3762 return 0;
3763
3764 out_free_driver:
3765 driver_unregister(&idetape_driver.gen_driver);
3766 out_free_class:
3767 class_destroy(idetape_sysfs_class);
3768 out:
3769 return error;
3770 }
3771
3772 MODULE_ALIAS("ide:*m-tape*");
3773 module_init(idetape_init);
3774 module_exit(idetape_exit);
3775 MODULE_ALIAS_CHARDEV_MAJOR(IDETAPE_MAJOR);
3776 MODULE_DESCRIPTION("ATAPI Streaming TAPE Driver");
3777 MODULE_LICENSE("GPL");
This page took 0.191221 seconds and 5 git commands to generate.