x86/vsmp: Ignore IOAPIC IRQ affinity if possible
[deliverable/linux.git] / arch / x86 / kernel / vsmp_64.c
CommitLineData
79f12614
RT
1/*
2 * vSMPowered(tm) systems specific initialization
3 * Copyright (C) 2005 ScaleMP Inc.
4 *
5 * Use of this code is subject to the terms and conditions of the
6 * GNU general public license version 2. See "COPYING" or
7 * http://www.gnu.org/licenses/gpl.html
8 *
9 * Ravikiran Thirumalai <kiran@scalemp.com>,
10 * Shai Fultheim <shai@scalemp.com>
96597fd2
GC
11 * Paravirt ops integration: Glauber de Oliveira Costa <gcosta@redhat.com>,
12 * Ravikiran Thirumalai <kiran@scalemp.com>
79f12614
RT
13 */
14
15#include <linux/init.h>
16#include <linux/pci_ids.h>
17#include <linux/pci_regs.h>
ead91d4b 18#include <linux/smp.h>
110c1e1f 19#include <linux/irq.h>
eef8f871
TG
20
21#include <asm/apic.h>
79f12614 22#include <asm/pci-direct.h>
734c4c67 23#include <asm/io.h>
96597fd2 24#include <asm/paravirt.h>
eef8f871 25#include <asm/setup.h>
96597fd2 26
ead91d4b
SF
27#define TOPOLOGY_REGISTER_OFFSET 0x10
28
70511134 29#if defined CONFIG_PCI && defined CONFIG_PARAVIRT
96597fd2
GC
30/*
31 * Interrupt control on vSMPowered systems:
32 * ~AC is a shadow of IF. If IF is 'on' AC should be 'off'
33 * and vice versa.
34 */
35
36static unsigned long vsmp_save_fl(void)
37{
38 unsigned long flags = native_save_fl();
39
40 if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC))
41 flags &= ~X86_EFLAGS_IF;
42 return flags;
43}
ecb93d1c 44PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
96597fd2
GC
45
46static void vsmp_restore_fl(unsigned long flags)
47{
48 if (flags & X86_EFLAGS_IF)
49 flags &= ~X86_EFLAGS_AC;
50 else
51 flags |= X86_EFLAGS_AC;
52 native_restore_fl(flags);
53}
ecb93d1c 54PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
96597fd2
GC
55
56static void vsmp_irq_disable(void)
57{
58 unsigned long flags = native_save_fl();
59
60 native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
61}
ecb93d1c 62PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
96597fd2
GC
63
64static void vsmp_irq_enable(void)
65{
66 unsigned long flags = native_save_fl();
67
68 native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
69}
ecb93d1c 70PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
96597fd2 71
05e12e1c 72static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
96597fd2
GC
73 unsigned long addr, unsigned len)
74{
75 switch (type) {
76 case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
77 case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
78 case PARAVIRT_PATCH(pv_irq_ops.save_fl):
79 case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
80 return paravirt_patch_default(type, clobbers, ibuf, addr, len);
81 default:
82 return native_patch(type, clobbers, ibuf, addr, len);
83 }
84
85}
79f12614 86
aa7d8e25 87static void __init set_vsmp_pv_ops(void)
79f12614 88{
9352f569 89 void __iomem *address;
2785c8d0 90 unsigned int cap, ctl, cfg;
79f12614 91
79f12614 92 /* set vSMP magic bits to indicate vSMP capable kernel */
2785c8d0
GC
93 cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0);
94 address = early_ioremap(cfg, 8);
79f12614
RT
95 cap = readl(address);
96 ctl = readl(address + 4);
ed4aed98
TG
97 printk(KERN_INFO "vSMP CTL: capabilities:0x%08x control:0x%08x\n",
98 cap, ctl);
110c1e1f
RT
99
100 /* If possible, let the vSMP foundation route the interrupt optimally */
101#ifdef CONFIG_SMP
102 if (cap & ctl & BIT(8)) {
103 ctl &= ~BIT(8);
104 no_irq_affinity = 1;
105 }
106#endif
107
79f12614 108 if (cap & ctl & (1 << 4)) {
9f6d8552 109 /* Setup irq ops and turn on vSMP IRQ fastpath handling */
ecb93d1c
JF
110 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
111 pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
112 pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
113 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
9f6d8552 114 pv_init_ops.patch = vsmp_patch;
79f12614 115 ctl &= ~(1 << 4);
79f12614 116 }
110c1e1f
RT
117 writel(ctl, address + 4);
118 ctl = readl(address + 4);
119 pr_info("vSMP CTL: control set to:0x%08x\n", ctl);
79f12614 120
2785c8d0 121 early_iounmap(address, 8);
aa7d8e25
RT
122}
123#else
124static void __init set_vsmp_pv_ops(void)
125{
126}
127#endif
128
70511134 129#ifdef CONFIG_PCI
e5699a82 130static int is_vsmp = -1;
aa7d8e25 131
e5699a82 132static void __init detect_vsmp_box(void)
aa7d8e25 133{
e5699a82 134 is_vsmp = 0;
aa7d8e25 135
aa7d8e25 136 if (!early_pci_allowed())
e5699a82 137 return;
aa7d8e25 138
e5699a82 139 /* Check if we are running on a ScaleMP vSMPowered box */
6542fe80
IM
140 if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID) ==
141 (PCI_VENDOR_ID_SCALEMP | (PCI_DEVICE_ID_SCALEMP_VSMP_CTL << 16)))
e5699a82
RT
142 is_vsmp = 1;
143}
aa7d8e25 144
e5699a82
RT
145int is_vsmp_box(void)
146{
147 if (is_vsmp != -1)
148 return is_vsmp;
149 else {
150 WARN_ON_ONCE(1);
151 return 0;
152 }
aa7d8e25 153}
aa7d8e25 154
70511134
RT
155#else
156static void __init detect_vsmp_box(void)
157{
158}
159int is_vsmp_box(void)
160{
161 return 0;
162}
163#endif
ead91d4b
SF
164
165static void __init vsmp_cap_cpus(void)
166{
167#if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP)
168 void __iomem *address;
169 unsigned int cfg, topology, node_shift, maxcpus;
170
171 /*
172 * CONFIG_X86_VSMP is not configured, so limit the number CPUs to the
173 * ones present in the first board, unless explicitly overridden by
174 * setup_max_cpus
175 */
176 if (setup_max_cpus != NR_CPUS)
177 return;
178
179 /* Read the vSMP Foundation topology register */
180 cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0);
181 address = early_ioremap(cfg + TOPOLOGY_REGISTER_OFFSET, 4);
182 if (WARN_ON(!address))
183 return;
184
185 topology = readl(address);
186 node_shift = (topology >> 16) & 0x7;
187 if (!node_shift)
188 /* The value 0 should be decoded as 8 */
189 node_shift = 8;
190 maxcpus = (topology & ((1 << node_shift) - 1)) + 1;
191
192 pr_info("vSMP CTL: Capping CPUs to %d (CONFIG_X86_VSMP is unset)\n",
193 maxcpus);
194 setup_max_cpus = maxcpus;
195 early_iounmap(address, 4);
196#endif
197}
198
7db971b2
IY
199static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
200{
201 return hard_smp_processor_id() >> index_msb;
202}
203
110c1e1f
RT
204/*
205 * In vSMP, all cpus should be capable of handling interrupts, regardless of
206 * the APIC used.
207 */
208static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask)
209{
210 cpumask_setall(retmask);
211}
212
7db971b2
IY
213static void vsmp_apic_post_init(void)
214{
215 /* need to update phys_pkg_id */
216 apic->phys_pkg_id = apicid_phys_pkg_id;
110c1e1f 217 apic->vector_allocation_domain = fill_vector_allocation_domain;
7db971b2
IY
218}
219
aa7d8e25
RT
220void __init vsmp_init(void)
221{
e5699a82 222 detect_vsmp_box();
aa7d8e25
RT
223 if (!is_vsmp_box())
224 return;
225
7db971b2
IY
226 x86_platform.apic_post_init = vsmp_apic_post_init;
227
ead91d4b
SF
228 vsmp_cap_cpus();
229
aa7d8e25 230 set_vsmp_pv_ops();
a2beab31 231 return;
79f12614 232}
This page took 0.595016 seconds and 5 git commands to generate.