Pull bugzilla-7200 into release branch
[deliverable/linux.git] / drivers / ata / pata_serverworks.c
1 /*
2 * ata-serverworks.c - Serverworks PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * based upon
7 *
8 * serverworks.c
9 *
10 * Copyright (C) 1998-2000 Michel Aubry
11 * Copyright (C) 1998-2000 Andrzej Krzysztofowicz
12 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
13 * Portions copyright (c) 2001 Sun Microsystems
14 *
15 *
16 * RCC/ServerWorks IDE driver for Linux
17 *
18 * OSB4: `Open South Bridge' IDE Interface (fn 1)
19 * supports UDMA mode 2 (33 MB/s)
20 *
21 * CSB5: `Champion South Bridge' IDE Interface (fn 1)
22 * all revisions support UDMA mode 4 (66 MB/s)
23 * revision A2.0 and up support UDMA mode 5 (100 MB/s)
24 *
25 * *** The CSB5 does not provide ANY register ***
26 * *** to detect 80-conductor cable presence. ***
27 *
28 * CSB6: `Champion South Bridge' IDE Interface (optional: third channel)
29 *
30 * Documentation:
31 * Available under NDA only. Errata info very hard to get.
32 */
33
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/init.h>
38 #include <linux/blkdev.h>
39 #include <linux/delay.h>
40 #include <scsi/scsi_host.h>
41 #include <linux/libata.h>
42
43 #define DRV_NAME "pata_serverworks"
44 #define DRV_VERSION "0.3.9"
45
46 #define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
47 #define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
48
49 /* Seagate Barracuda ATA IV Family drives in UDMA mode 5
50 * can overrun their FIFOs when used with the CSB5 */
51
52 static const char *csb_bad_ata100[] = {
53 "ST320011A",
54 "ST340016A",
55 "ST360021A",
56 "ST380021A",
57 NULL
58 };
59
60 /**
61 * dell_cable - Dell serverworks cable detection
62 * @ap: ATA port to do cable detect
63 *
64 * Dell hide the 40/80 pin select for their interfaces in the top two
65 * bits of the subsystem ID.
66 */
67
68 static int dell_cable(struct ata_port *ap) {
69 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
70
71 if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
72 return ATA_CBL_PATA80;
73 return ATA_CBL_PATA40;
74 }
75
76 /**
77 * sun_cable - Sun Cobalt 'Alpine' cable detection
78 * @ap: ATA port to do cable select
79 *
80 * Cobalt CSB5 IDE hides the 40/80pin in the top two bits of the
81 * subsystem ID the same as dell. We could use one function but we may
82 * need to extend the Dell one in future
83 */
84
85 static int sun_cable(struct ata_port *ap) {
86 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
87
88 if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
89 return ATA_CBL_PATA80;
90 return ATA_CBL_PATA40;
91 }
92
93 /**
94 * osb4_cable - OSB4 cable detect
95 * @ap: ATA port to check
96 *
97 * The OSB4 isn't UDMA66 capable so this is easy
98 */
99
100 static int osb4_cable(struct ata_port *ap) {
101 return ATA_CBL_PATA40;
102 }
103
104 /**
105 * csb4_cable - CSB5/6 cable detect
106 * @ap: ATA port to check
107 *
108 * Serverworks default arrangement is to use the drive side detection
109 * only.
110 */
111
112 static int csb_cable(struct ata_port *ap) {
113 return ATA_CBL_PATA80;
114 }
115
116 struct sv_cable_table {
117 int device;
118 int subvendor;
119 int (*cable_detect)(struct ata_port *ap);
120 };
121
122 /*
123 * Note that we don't copy the old serverworks code because the old
124 * code contains obvious mistakes
125 */
126
127 static struct sv_cable_table cable_detect[] = {
128 { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_DELL, dell_cable },
129 { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_VENDOR_ID_DELL, dell_cable },
130 { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_SUN, sun_cable },
131 { PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, PCI_ANY_ID, osb4_cable },
132 { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, csb_cable },
133 { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, csb_cable },
134 { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, csb_cable },
135 { PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, csb_cable },
136 { }
137 };
138
139 /**
140 * serverworks_pre_reset - cable detection
141 * @ap: ATA port
142 *
143 * Perform cable detection according to the device and subvendor
144 * identifications
145 */
146
147 static int serverworks_pre_reset(struct ata_port *ap) {
148 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
149 struct sv_cable_table *cb = cable_detect;
150
151 while(cb->device) {
152 if (cb->device == pdev->device &&
153 (cb->subvendor == pdev->subsystem_vendor ||
154 cb->subvendor == PCI_ANY_ID)) {
155 ap->cbl = cb->cable_detect(ap);
156 return ata_std_prereset(ap);
157 }
158 cb++;
159 }
160
161 BUG();
162 return -1; /* kill compiler warning */
163 }
164
165 static void serverworks_error_handler(struct ata_port *ap)
166 {
167 return ata_bmdma_drive_eh(ap, serverworks_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
168 }
169
170 /**
171 * serverworks_is_csb - Check for CSB or OSB
172 * @pdev: PCI device to check
173 *
174 * Returns true if the device being checked is known to be a CSB
175 * series device.
176 */
177
178 static u8 serverworks_is_csb(struct pci_dev *pdev)
179 {
180 switch (pdev->device) {
181 case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
182 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
183 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
184 case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
185 return 1;
186 default:
187 break;
188 }
189 return 0;
190 }
191
192 /**
193 * serverworks_osb4_filter - mode selection filter
194 * @ap: ATA interface
195 * @adev: ATA device
196 *
197 * Filter the offered modes for the device to apply controller
198 * specific rules. OSB4 requires no UDMA for disks due to a FIFO
199 * bug we hit.
200 */
201
202 static unsigned long serverworks_osb4_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
203 {
204 if (adev->class == ATA_DEV_ATA)
205 mask &= ~ATA_MASK_UDMA;
206 return ata_pci_default_filter(ap, adev, mask);
207 }
208
209
210 /**
211 * serverworks_csb_filter - mode selection filter
212 * @ap: ATA interface
213 * @adev: ATA device
214 *
215 * Check the blacklist and disable UDMA5 if matched
216 */
217
218 static unsigned long serverworks_csb_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
219 {
220 const char *p;
221 char model_num[ATA_ID_PROD_LEN + 1];
222 int i;
223
224 /* Disk, UDMA */
225 if (adev->class != ATA_DEV_ATA)
226 return ata_pci_default_filter(ap, adev, mask);
227
228 /* Actually do need to check */
229 ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
230
231 for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) {
232 if (!strcmp(p, model_num))
233 mask &= ~(0x1F << ATA_SHIFT_UDMA);
234 }
235 return ata_pci_default_filter(ap, adev, mask);
236 }
237
238
239 /**
240 * serverworks_set_piomode - set initial PIO mode data
241 * @ap: ATA interface
242 * @adev: ATA device
243 *
244 * Program the OSB4/CSB5 timing registers for PIO. The PIO register
245 * load is done as a simple lookup.
246 */
247 static void serverworks_set_piomode(struct ata_port *ap, struct ata_device *adev)
248 {
249 static const u8 pio_mode[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
250 int offset = 1 + (2 * ap->port_no) - adev->devno;
251 int devbits = (2 * ap->port_no + adev->devno) * 4;
252 u16 csb5_pio;
253 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
254 int pio = adev->pio_mode - XFER_PIO_0;
255
256 pci_write_config_byte(pdev, 0x40 + offset, pio_mode[pio]);
257
258 /* The OSB4 just requires the timing but the CSB series want the
259 mode number as well */
260 if (serverworks_is_csb(pdev)) {
261 pci_read_config_word(pdev, 0x4A, &csb5_pio);
262 csb5_pio &= ~(0x0F << devbits);
263 pci_write_config_byte(pdev, 0x4A, csb5_pio | (pio << devbits));
264 }
265 }
266
267 /**
268 * serverworks_set_dmamode - set initial DMA mode data
269 * @ap: ATA interface
270 * @adev: ATA device
271 *
272 * Program the MWDMA/UDMA modes for the serverworks OSB4/CSB5
273 * chipset. The MWDMA mode values are pulled from a lookup table
274 * while the chipset uses mode number for UDMA.
275 */
276
277 static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev)
278 {
279 static const u8 dma_mode[] = { 0x77, 0x21, 0x20 };
280 int offset = 1 + 2 * ap->port_no - adev->devno;
281 int devbits = (2 * ap->port_no + adev->devno);
282 u8 ultra;
283 u8 ultra_cfg;
284 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
285
286 pci_read_config_byte(pdev, 0x54, &ultra_cfg);
287
288 if (adev->dma_mode >= XFER_UDMA_0) {
289 pci_write_config_byte(pdev, 0x44 + offset, 0x20);
290
291 pci_read_config_byte(pdev, 0x56 + ap->port_no, &ultra);
292 ultra &= ~(0x0F << (ap->port_no * 4));
293 ultra |= (adev->dma_mode - XFER_UDMA_0)
294 << (ap->port_no * 4);
295 pci_write_config_byte(pdev, 0x56 + ap->port_no, ultra);
296
297 ultra_cfg |= (1 << devbits);
298 } else {
299 pci_write_config_byte(pdev, 0x44 + offset,
300 dma_mode[adev->dma_mode - XFER_MW_DMA_0]);
301 ultra_cfg &= ~(1 << devbits);
302 }
303 pci_write_config_byte(pdev, 0x54, ultra_cfg);
304 }
305
306 static struct scsi_host_template serverworks_sht = {
307 .module = THIS_MODULE,
308 .name = DRV_NAME,
309 .ioctl = ata_scsi_ioctl,
310 .queuecommand = ata_scsi_queuecmd,
311 .can_queue = ATA_DEF_QUEUE,
312 .this_id = ATA_SHT_THIS_ID,
313 .sg_tablesize = LIBATA_MAX_PRD,
314 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
315 .emulated = ATA_SHT_EMULATED,
316 .use_clustering = ATA_SHT_USE_CLUSTERING,
317 .proc_name = DRV_NAME,
318 .dma_boundary = ATA_DMA_BOUNDARY,
319 .slave_configure = ata_scsi_slave_config,
320 .slave_destroy = ata_scsi_slave_destroy,
321 .bios_param = ata_std_bios_param,
322 .resume = ata_scsi_device_resume,
323 .suspend = ata_scsi_device_suspend,
324 };
325
326 static struct ata_port_operations serverworks_osb4_port_ops = {
327 .port_disable = ata_port_disable,
328 .set_piomode = serverworks_set_piomode,
329 .set_dmamode = serverworks_set_dmamode,
330 .mode_filter = serverworks_osb4_filter,
331
332 .tf_load = ata_tf_load,
333 .tf_read = ata_tf_read,
334 .check_status = ata_check_status,
335 .exec_command = ata_exec_command,
336 .dev_select = ata_std_dev_select,
337
338 .freeze = ata_bmdma_freeze,
339 .thaw = ata_bmdma_thaw,
340 .error_handler = serverworks_error_handler,
341 .post_internal_cmd = ata_bmdma_post_internal_cmd,
342
343 .bmdma_setup = ata_bmdma_setup,
344 .bmdma_start = ata_bmdma_start,
345 .bmdma_stop = ata_bmdma_stop,
346 .bmdma_status = ata_bmdma_status,
347
348 .qc_prep = ata_qc_prep,
349 .qc_issue = ata_qc_issue_prot,
350
351 .data_xfer = ata_data_xfer,
352
353 .irq_handler = ata_interrupt,
354 .irq_clear = ata_bmdma_irq_clear,
355 .irq_on = ata_irq_on,
356 .irq_ack = ata_irq_ack,
357
358 .port_start = ata_port_start,
359 };
360
361 static struct ata_port_operations serverworks_csb_port_ops = {
362 .port_disable = ata_port_disable,
363 .set_piomode = serverworks_set_piomode,
364 .set_dmamode = serverworks_set_dmamode,
365 .mode_filter = serverworks_csb_filter,
366
367 .tf_load = ata_tf_load,
368 .tf_read = ata_tf_read,
369 .check_status = ata_check_status,
370 .exec_command = ata_exec_command,
371 .dev_select = ata_std_dev_select,
372
373 .freeze = ata_bmdma_freeze,
374 .thaw = ata_bmdma_thaw,
375 .error_handler = serverworks_error_handler,
376 .post_internal_cmd = ata_bmdma_post_internal_cmd,
377
378 .bmdma_setup = ata_bmdma_setup,
379 .bmdma_start = ata_bmdma_start,
380 .bmdma_stop = ata_bmdma_stop,
381 .bmdma_status = ata_bmdma_status,
382
383 .qc_prep = ata_qc_prep,
384 .qc_issue = ata_qc_issue_prot,
385
386 .data_xfer = ata_data_xfer,
387
388 .irq_handler = ata_interrupt,
389 .irq_clear = ata_bmdma_irq_clear,
390 .irq_on = ata_irq_on,
391 .irq_ack = ata_irq_ack,
392
393 .port_start = ata_port_start,
394 };
395
396 static int serverworks_fixup_osb4(struct pci_dev *pdev)
397 {
398 u32 reg;
399 struct pci_dev *isa_dev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
400 PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
401 if (isa_dev) {
402 pci_read_config_dword(isa_dev, 0x64, &reg);
403 reg &= ~0x00002000; /* disable 600ns interrupt mask */
404 if (!(reg & 0x00004000))
405 printk(KERN_DEBUG DRV_NAME ": UDMA not BIOS enabled.\n");
406 reg |= 0x00004000; /* enable UDMA/33 support */
407 pci_write_config_dword(isa_dev, 0x64, reg);
408 pci_dev_put(isa_dev);
409 return 0;
410 }
411 printk(KERN_WARNING "ata_serverworks: Unable to find bridge.\n");
412 return -ENODEV;
413 }
414
415 static int serverworks_fixup_csb(struct pci_dev *pdev)
416 {
417 u8 rev;
418 u8 btr;
419
420 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
421
422 /* Third Channel Test */
423 if (!(PCI_FUNC(pdev->devfn) & 1)) {
424 struct pci_dev * findev = NULL;
425 u32 reg4c = 0;
426 findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
427 PCI_DEVICE_ID_SERVERWORKS_CSB5, NULL);
428 if (findev) {
429 pci_read_config_dword(findev, 0x4C, &reg4c);
430 reg4c &= ~0x000007FF;
431 reg4c |= 0x00000040;
432 reg4c |= 0x00000020;
433 pci_write_config_dword(findev, 0x4C, reg4c);
434 pci_dev_put(findev);
435 }
436 } else {
437 struct pci_dev * findev = NULL;
438 u8 reg41 = 0;
439
440 findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
441 PCI_DEVICE_ID_SERVERWORKS_CSB6, NULL);
442 if (findev) {
443 pci_read_config_byte(findev, 0x41, &reg41);
444 reg41 &= ~0x40;
445 pci_write_config_byte(findev, 0x41, reg41);
446 pci_dev_put(findev);
447 }
448 }
449 /* setup the UDMA Control register
450 *
451 * 1. clear bit 6 to enable DMA
452 * 2. enable DMA modes with bits 0-1
453 * 00 : legacy
454 * 01 : udma2
455 * 10 : udma2/udma4
456 * 11 : udma2/udma4/udma5
457 */
458 pci_read_config_byte(pdev, 0x5A, &btr);
459 btr &= ~0x40;
460 if (!(PCI_FUNC(pdev->devfn) & 1))
461 btr |= 0x2;
462 else
463 btr |= (rev >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
464 pci_write_config_byte(pdev, 0x5A, btr);
465
466 return btr;
467 }
468
469 static void serverworks_fixup_ht1000(struct pci_dev *pdev)
470 {
471 u8 btr;
472 /* Setup HT1000 SouthBridge Controller - Single Channel Only */
473 pci_read_config_byte(pdev, 0x5A, &btr);
474 btr &= ~0x40;
475 btr |= 0x3;
476 pci_write_config_byte(pdev, 0x5A, btr);
477 }
478
479
480 static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
481 {
482 int ports = 2;
483 static struct ata_port_info info[4] = {
484 { /* OSB4 */
485 .sht = &serverworks_sht,
486 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
487 .pio_mask = 0x1f,
488 .mwdma_mask = 0x07,
489 .udma_mask = 0x07,
490 .port_ops = &serverworks_osb4_port_ops
491 }, { /* OSB4 no UDMA */
492 .sht = &serverworks_sht,
493 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
494 .pio_mask = 0x1f,
495 .mwdma_mask = 0x07,
496 .udma_mask = 0x00,
497 .port_ops = &serverworks_osb4_port_ops
498 }, { /* CSB5 */
499 .sht = &serverworks_sht,
500 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
501 .pio_mask = 0x1f,
502 .mwdma_mask = 0x07,
503 .udma_mask = 0x1f,
504 .port_ops = &serverworks_csb_port_ops
505 }, { /* CSB5 - later revisions*/
506 .sht = &serverworks_sht,
507 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
508 .pio_mask = 0x1f,
509 .mwdma_mask = 0x07,
510 .udma_mask = 0x3f,
511 .port_ops = &serverworks_csb_port_ops
512 }
513 };
514 static struct ata_port_info *port_info[2];
515 struct ata_port_info *devinfo = &info[id->driver_data];
516
517 /* Force master latency timer to 64 PCI clocks */
518 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
519
520 /* OSB4 : South Bridge and IDE */
521 if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
522 /* Select non UDMA capable OSB4 if we can't do fixups */
523 if ( serverworks_fixup_osb4(pdev) < 0)
524 devinfo = &info[1];
525 }
526 /* setup CSB5/CSB6 : South Bridge and IDE option RAID */
527 else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
528 (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
529 (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) {
530
531 /* If the returned btr is the newer revision then
532 select the right info block */
533 if (serverworks_fixup_csb(pdev) == 3)
534 devinfo = &info[3];
535
536 /* Is this the 3rd channel CSB6 IDE ? */
537 if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)
538 ports = 1;
539 }
540 /* setup HT1000E */
541 else if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE)
542 serverworks_fixup_ht1000(pdev);
543
544 if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
545 ata_pci_clear_simplex(pdev);
546
547 port_info[0] = port_info[1] = devinfo;
548 return ata_pci_init_one(pdev, port_info, ports);
549 }
550
551 static int serverworks_reinit_one(struct pci_dev *pdev)
552 {
553 /* Force master latency timer to 64 PCI clocks */
554 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
555
556 switch (pdev->device)
557 {
558 case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE:
559 serverworks_fixup_osb4(pdev);
560 break;
561 case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
562 ata_pci_clear_simplex(pdev);
563 /* fall through */
564 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
565 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
566 serverworks_fixup_csb(pdev);
567 break;
568 case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
569 serverworks_fixup_ht1000(pdev);
570 break;
571 }
572 return ata_pci_device_resume(pdev);
573 }
574
575 static const struct pci_device_id serverworks[] = {
576 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
577 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2},
578 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2},
579 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 2},
580 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 2},
581
582 { },
583 };
584
585 static struct pci_driver serverworks_pci_driver = {
586 .name = DRV_NAME,
587 .id_table = serverworks,
588 .probe = serverworks_init_one,
589 .remove = ata_pci_remove_one,
590 .suspend = ata_pci_device_suspend,
591 .resume = serverworks_reinit_one,
592 };
593
594 static int __init serverworks_init(void)
595 {
596 return pci_register_driver(&serverworks_pci_driver);
597 }
598
599 static void __exit serverworks_exit(void)
600 {
601 pci_unregister_driver(&serverworks_pci_driver);
602 }
603
604 MODULE_AUTHOR("Alan Cox");
605 MODULE_DESCRIPTION("low-level driver for Serverworks OSB4/CSB5/CSB6");
606 MODULE_LICENSE("GPL");
607 MODULE_DEVICE_TABLE(pci, serverworks);
608 MODULE_VERSION(DRV_VERSION);
609
610 module_init(serverworks_init);
611 module_exit(serverworks_exit);
This page took 0.043333 seconds and 6 git commands to generate.