Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[deliverable/linux.git] / arch / x86 / kernel / quirks.c
1 /*
2 * This file contains work-arounds for x86 and x86_64 platform bugs.
3 */
4 #include <linux/pci.h>
5 #include <linux/irq.h>
6
7 #include <asm/hpet.h>
8
9 #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
10
11 static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
12 {
13 u8 config, rev;
14 u16 word;
15
16 /* BIOS may enable hardware IRQ balancing for
17 * E7520/E7320/E7525(revision ID 0x9 and below)
18 * based platforms.
19 * Disable SW irqbalance/affinity on those platforms.
20 */
21 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
22 if (rev > 0x9)
23 return;
24
25 /* enable access to config space*/
26 pci_read_config_byte(dev, 0xf4, &config);
27 pci_write_config_byte(dev, 0xf4, config|0x2);
28
29 /*
30 * read xTPR register. We may not have a pci_dev for device 8
31 * because it might be hidden until the above write.
32 */
33 pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word);
34
35 if (!(word & (1 << 13))) {
36 dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
37 "disabling irq balancing and affinity\n");
38 #ifdef CONFIG_IRQBALANCE
39 irqbalance_disable("");
40 #endif
41 noirqdebug_setup("");
42 #ifdef CONFIG_PROC_FS
43 no_irq_affinity = 1;
44 #endif
45 }
46
47 /* put back the original value for config space*/
48 if (!(config & 0x2))
49 pci_write_config_byte(dev, 0xf4, config);
50 }
51 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
52 quirk_intel_irqbalance);
53 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
54 quirk_intel_irqbalance);
55 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
56 quirk_intel_irqbalance);
57 #endif
58
59 #if defined(CONFIG_HPET_TIMER)
60 unsigned long force_hpet_address;
61
62 static enum {
63 NONE_FORCE_HPET_RESUME,
64 OLD_ICH_FORCE_HPET_RESUME,
65 ICH_FORCE_HPET_RESUME,
66 VT8237_FORCE_HPET_RESUME,
67 NVIDIA_FORCE_HPET_RESUME,
68 } force_hpet_resume_type;
69
70 static void __iomem *rcba_base;
71
72 static void ich_force_hpet_resume(void)
73 {
74 u32 val;
75
76 if (!force_hpet_address)
77 return;
78
79 if (rcba_base == NULL)
80 BUG();
81
82 /* read the Function Disable register, dword mode only */
83 val = readl(rcba_base + 0x3404);
84 if (!(val & 0x80)) {
85 /* HPET disabled in HPTC. Trying to enable */
86 writel(val | 0x80, rcba_base + 0x3404);
87 }
88
89 val = readl(rcba_base + 0x3404);
90 if (!(val & 0x80))
91 BUG();
92 else
93 printk(KERN_DEBUG "Force enabled HPET at resume\n");
94
95 return;
96 }
97
98 static void ich_force_enable_hpet(struct pci_dev *dev)
99 {
100 u32 val;
101 u32 uninitialized_var(rcba);
102 int err = 0;
103
104 if (hpet_address || force_hpet_address)
105 return;
106
107 pci_read_config_dword(dev, 0xF0, &rcba);
108 rcba &= 0xFFFFC000;
109 if (rcba == 0) {
110 dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; "
111 "cannot force enable HPET\n");
112 return;
113 }
114
115 /* use bits 31:14, 16 kB aligned */
116 rcba_base = ioremap_nocache(rcba, 0x4000);
117 if (rcba_base == NULL) {
118 dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
119 "cannot force enable HPET\n");
120 return;
121 }
122
123 /* read the Function Disable register, dword mode only */
124 val = readl(rcba_base + 0x3404);
125
126 if (val & 0x80) {
127 /* HPET is enabled in HPTC. Just not reported by BIOS */
128 val = val & 0x3;
129 force_hpet_address = 0xFED00000 | (val << 12);
130 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
131 "0x%lx\n", force_hpet_address);
132 iounmap(rcba_base);
133 return;
134 }
135
136 /* HPET disabled in HPTC. Trying to enable */
137 writel(val | 0x80, rcba_base + 0x3404);
138
139 val = readl(rcba_base + 0x3404);
140 if (!(val & 0x80)) {
141 err = 1;
142 } else {
143 val = val & 0x3;
144 force_hpet_address = 0xFED00000 | (val << 12);
145 }
146
147 if (err) {
148 force_hpet_address = 0;
149 iounmap(rcba_base);
150 dev_printk(KERN_DEBUG, &dev->dev,
151 "Failed to force enable HPET\n");
152 } else {
153 force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
154 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
155 "0x%lx\n", force_hpet_address);
156 }
157 }
158
159 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
160 ich_force_enable_hpet);
161 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
162 ich_force_enable_hpet);
163 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
164 ich_force_enable_hpet);
165 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
166 ich_force_enable_hpet);
167 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
168 ich_force_enable_hpet);
169 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
170 ich_force_enable_hpet);
171 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
172 ich_force_enable_hpet);
173
174
175 static struct pci_dev *cached_dev;
176
177 static void old_ich_force_hpet_resume(void)
178 {
179 u32 val;
180 u32 uninitialized_var(gen_cntl);
181
182 if (!force_hpet_address || !cached_dev)
183 return;
184
185 pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
186 gen_cntl &= (~(0x7 << 15));
187 gen_cntl |= (0x4 << 15);
188
189 pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
190 pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
191 val = gen_cntl >> 15;
192 val &= 0x7;
193 if (val == 0x4)
194 printk(KERN_DEBUG "Force enabled HPET at resume\n");
195 else
196 BUG();
197 }
198
199 static void old_ich_force_enable_hpet(struct pci_dev *dev)
200 {
201 u32 val;
202 u32 uninitialized_var(gen_cntl);
203
204 if (hpet_address || force_hpet_address)
205 return;
206
207 pci_read_config_dword(dev, 0xD0, &gen_cntl);
208 /*
209 * Bit 17 is HPET enable bit.
210 * Bit 16:15 control the HPET base address.
211 */
212 val = gen_cntl >> 15;
213 val &= 0x7;
214 if (val & 0x4) {
215 val &= 0x3;
216 force_hpet_address = 0xFED00000 | (val << 12);
217 dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
218 force_hpet_address);
219 return;
220 }
221
222 /*
223 * HPET is disabled. Trying enabling at FED00000 and check
224 * whether it sticks
225 */
226 gen_cntl &= (~(0x7 << 15));
227 gen_cntl |= (0x4 << 15);
228 pci_write_config_dword(dev, 0xD0, gen_cntl);
229
230 pci_read_config_dword(dev, 0xD0, &gen_cntl);
231
232 val = gen_cntl >> 15;
233 val &= 0x7;
234 if (val & 0x4) {
235 /* HPET is enabled in HPTC. Just not reported by BIOS */
236 val &= 0x3;
237 force_hpet_address = 0xFED00000 | (val << 12);
238 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
239 "0x%lx\n", force_hpet_address);
240 cached_dev = dev;
241 force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
242 return;
243 }
244
245 dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
246 }
247
248 /*
249 * Undocumented chipset features. Make sure that the user enforced
250 * this.
251 */
252 static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
253 {
254 if (hpet_force_user)
255 old_ich_force_enable_hpet(dev);
256 }
257
258 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
259 old_ich_force_enable_hpet_user);
260 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
261 old_ich_force_enable_hpet_user);
262 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
263 old_ich_force_enable_hpet_user);
264 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
265 old_ich_force_enable_hpet_user);
266 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
267 old_ich_force_enable_hpet);
268 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
269 old_ich_force_enable_hpet);
270
271
272 static void vt8237_force_hpet_resume(void)
273 {
274 u32 val;
275
276 if (!force_hpet_address || !cached_dev)
277 return;
278
279 val = 0xfed00000 | 0x80;
280 pci_write_config_dword(cached_dev, 0x68, val);
281
282 pci_read_config_dword(cached_dev, 0x68, &val);
283 if (val & 0x80)
284 printk(KERN_DEBUG "Force enabled HPET at resume\n");
285 else
286 BUG();
287 }
288
289 static void vt8237_force_enable_hpet(struct pci_dev *dev)
290 {
291 u32 uninitialized_var(val);
292
293 if (!hpet_force_user || hpet_address || force_hpet_address)
294 return;
295
296 pci_read_config_dword(dev, 0x68, &val);
297 /*
298 * Bit 7 is HPET enable bit.
299 * Bit 31:10 is HPET base address (contrary to what datasheet claims)
300 */
301 if (val & 0x80) {
302 force_hpet_address = (val & ~0x3ff);
303 dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
304 force_hpet_address);
305 return;
306 }
307
308 /*
309 * HPET is disabled. Trying enabling at FED00000 and check
310 * whether it sticks
311 */
312 val = 0xfed00000 | 0x80;
313 pci_write_config_dword(dev, 0x68, val);
314
315 pci_read_config_dword(dev, 0x68, &val);
316 if (val & 0x80) {
317 force_hpet_address = (val & ~0x3ff);
318 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
319 "0x%lx\n", force_hpet_address);
320 cached_dev = dev;
321 force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
322 return;
323 }
324
325 dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
326 }
327
328 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
329 vt8237_force_enable_hpet);
330 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
331 vt8237_force_enable_hpet);
332
333 /*
334 * Undocumented chipset feature taken from LinuxBIOS.
335 */
336 static void nvidia_force_hpet_resume(void)
337 {
338 pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
339 printk(KERN_DEBUG "Force enabled HPET at resume\n");
340 }
341
342 static void nvidia_force_enable_hpet(struct pci_dev *dev)
343 {
344 u32 uninitialized_var(val);
345
346 if (!hpet_force_user || hpet_address || force_hpet_address)
347 return;
348
349 pci_write_config_dword(dev, 0x44, 0xfed00001);
350 pci_read_config_dword(dev, 0x44, &val);
351 force_hpet_address = val & 0xfffffffe;
352 force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
353 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
354 force_hpet_address);
355 cached_dev = dev;
356 return;
357 }
358
359 /* ISA Bridges */
360 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
361 nvidia_force_enable_hpet);
362 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
363 nvidia_force_enable_hpet);
364
365 /* LPC bridges */
366 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
367 nvidia_force_enable_hpet);
368 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
369 nvidia_force_enable_hpet);
370 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
371 nvidia_force_enable_hpet);
372 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
373 nvidia_force_enable_hpet);
374 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
375 nvidia_force_enable_hpet);
376 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
377 nvidia_force_enable_hpet);
378 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
379 nvidia_force_enable_hpet);
380 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
381 nvidia_force_enable_hpet);
382 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
383 nvidia_force_enable_hpet);
384
385 void force_hpet_resume(void)
386 {
387 switch (force_hpet_resume_type) {
388 case ICH_FORCE_HPET_RESUME:
389 ich_force_hpet_resume();
390 return;
391 case OLD_ICH_FORCE_HPET_RESUME:
392 old_ich_force_hpet_resume();
393 return;
394 case VT8237_FORCE_HPET_RESUME:
395 vt8237_force_hpet_resume();
396 return;
397 case NVIDIA_FORCE_HPET_RESUME:
398 nvidia_force_hpet_resume();
399 return;
400 default:
401 break;
402 }
403 }
404
405 #endif
This page took 0.047153 seconds and 6 git commands to generate.