Merge branch 'strscpy' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf...
[deliverable/linux.git] / arch / x86 / kvm / iommu.c
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright IBM Corporation, 2008
19 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
20 *
21 * Author: Allen M. Kay <allen.m.kay@intel.com>
22 * Author: Weidong Han <weidong.han@intel.com>
23 * Author: Ben-Ami Yassour <benami@il.ibm.com>
24 */
25
26 #include <linux/list.h>
27 #include <linux/kvm_host.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/stat.h>
31 #include <linux/dmar.h>
32 #include <linux/iommu.h>
33 #include <linux/intel-iommu.h>
34 #include "assigned-dev.h"
35
36 static bool allow_unsafe_assigned_interrupts;
37 module_param_named(allow_unsafe_assigned_interrupts,
38 allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
39 MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
40 "Enable device assignment on platforms without interrupt remapping support.");
41
42 static int kvm_iommu_unmap_memslots(struct kvm *kvm);
43 static void kvm_iommu_put_pages(struct kvm *kvm,
44 gfn_t base_gfn, unsigned long npages);
45
46 static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
47 unsigned long npages)
48 {
49 gfn_t end_gfn;
50 pfn_t pfn;
51
52 pfn = gfn_to_pfn_memslot(slot, gfn);
53 end_gfn = gfn + npages;
54 gfn += 1;
55
56 if (is_error_noslot_pfn(pfn))
57 return pfn;
58
59 while (gfn < end_gfn)
60 gfn_to_pfn_memslot(slot, gfn++);
61
62 return pfn;
63 }
64
65 static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
66 {
67 unsigned long i;
68
69 for (i = 0; i < npages; ++i)
70 kvm_release_pfn_clean(pfn + i);
71 }
72
73 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
74 {
75 gfn_t gfn, end_gfn;
76 pfn_t pfn;
77 int r = 0;
78 struct iommu_domain *domain = kvm->arch.iommu_domain;
79 int flags;
80
81 /* check if iommu exists and in use */
82 if (!domain)
83 return 0;
84
85 gfn = slot->base_gfn;
86 end_gfn = gfn + slot->npages;
87
88 flags = IOMMU_READ;
89 if (!(slot->flags & KVM_MEM_READONLY))
90 flags |= IOMMU_WRITE;
91 if (!kvm->arch.iommu_noncoherent)
92 flags |= IOMMU_CACHE;
93
94
95 while (gfn < end_gfn) {
96 unsigned long page_size;
97
98 /* Check if already mapped */
99 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
100 gfn += 1;
101 continue;
102 }
103
104 /* Get the page size we could use to map */
105 page_size = kvm_host_page_size(kvm, gfn);
106
107 /* Make sure the page_size does not exceed the memslot */
108 while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
109 page_size >>= 1;
110
111 /* Make sure gfn is aligned to the page size we want to map */
112 while ((gfn << PAGE_SHIFT) & (page_size - 1))
113 page_size >>= 1;
114
115 /* Make sure hva is aligned to the page size we want to map */
116 while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
117 page_size >>= 1;
118
119 /*
120 * Pin all pages we are about to map in memory. This is
121 * important because we unmap and unpin in 4kb steps later.
122 */
123 pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
124 if (is_error_noslot_pfn(pfn)) {
125 gfn += 1;
126 continue;
127 }
128
129 /* Map into IO address space */
130 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
131 page_size, flags);
132 if (r) {
133 printk(KERN_ERR "kvm_iommu_map_address:"
134 "iommu failed to map pfn=%llx\n", pfn);
135 kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
136 goto unmap_pages;
137 }
138
139 gfn += page_size >> PAGE_SHIFT;
140
141 cond_resched();
142 }
143
144 return 0;
145
146 unmap_pages:
147 kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
148 return r;
149 }
150
151 static int kvm_iommu_map_memslots(struct kvm *kvm)
152 {
153 int idx, r = 0;
154 struct kvm_memslots *slots;
155 struct kvm_memory_slot *memslot;
156
157 if (kvm->arch.iommu_noncoherent)
158 kvm_arch_register_noncoherent_dma(kvm);
159
160 idx = srcu_read_lock(&kvm->srcu);
161 slots = kvm_memslots(kvm);
162
163 kvm_for_each_memslot(memslot, slots) {
164 r = kvm_iommu_map_pages(kvm, memslot);
165 if (r)
166 break;
167 }
168 srcu_read_unlock(&kvm->srcu, idx);
169
170 return r;
171 }
172
173 int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
174 {
175 struct iommu_domain *domain = kvm->arch.iommu_domain;
176 int r;
177 bool noncoherent;
178
179 /* check if iommu exists and in use */
180 if (!domain)
181 return 0;
182
183 if (pdev == NULL)
184 return -ENODEV;
185
186 r = iommu_attach_device(domain, &pdev->dev);
187 if (r) {
188 dev_err(&pdev->dev, "kvm assign device failed ret %d", r);
189 return r;
190 }
191
192 noncoherent = !iommu_capable(&pci_bus_type, IOMMU_CAP_CACHE_COHERENCY);
193
194 /* Check if need to update IOMMU page table for guest memory */
195 if (noncoherent != kvm->arch.iommu_noncoherent) {
196 kvm_iommu_unmap_memslots(kvm);
197 kvm->arch.iommu_noncoherent = noncoherent;
198 r = kvm_iommu_map_memslots(kvm);
199 if (r)
200 goto out_unmap;
201 }
202
203 kvm_arch_start_assignment(kvm);
204 pci_set_dev_assigned(pdev);
205
206 dev_info(&pdev->dev, "kvm assign device\n");
207
208 return 0;
209 out_unmap:
210 kvm_iommu_unmap_memslots(kvm);
211 return r;
212 }
213
214 int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
215 {
216 struct iommu_domain *domain = kvm->arch.iommu_domain;
217
218 /* check if iommu exists and in use */
219 if (!domain)
220 return 0;
221
222 if (pdev == NULL)
223 return -ENODEV;
224
225 iommu_detach_device(domain, &pdev->dev);
226
227 pci_clear_dev_assigned(pdev);
228 kvm_arch_end_assignment(kvm);
229
230 dev_info(&pdev->dev, "kvm deassign device\n");
231
232 return 0;
233 }
234
235 int kvm_iommu_map_guest(struct kvm *kvm)
236 {
237 int r;
238
239 if (!iommu_present(&pci_bus_type)) {
240 printk(KERN_ERR "%s: iommu not found\n", __func__);
241 return -ENODEV;
242 }
243
244 mutex_lock(&kvm->slots_lock);
245
246 kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
247 if (!kvm->arch.iommu_domain) {
248 r = -ENOMEM;
249 goto out_unlock;
250 }
251
252 if (!allow_unsafe_assigned_interrupts &&
253 !iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) {
254 printk(KERN_WARNING "%s: No interrupt remapping support,"
255 " disallowing device assignment."
256 " Re-enble with \"allow_unsafe_assigned_interrupts=1\""
257 " module option.\n", __func__);
258 iommu_domain_free(kvm->arch.iommu_domain);
259 kvm->arch.iommu_domain = NULL;
260 r = -EPERM;
261 goto out_unlock;
262 }
263
264 r = kvm_iommu_map_memslots(kvm);
265 if (r)
266 kvm_iommu_unmap_memslots(kvm);
267
268 out_unlock:
269 mutex_unlock(&kvm->slots_lock);
270 return r;
271 }
272
273 static void kvm_iommu_put_pages(struct kvm *kvm,
274 gfn_t base_gfn, unsigned long npages)
275 {
276 struct iommu_domain *domain;
277 gfn_t end_gfn, gfn;
278 pfn_t pfn;
279 u64 phys;
280
281 domain = kvm->arch.iommu_domain;
282 end_gfn = base_gfn + npages;
283 gfn = base_gfn;
284
285 /* check if iommu exists and in use */
286 if (!domain)
287 return;
288
289 while (gfn < end_gfn) {
290 unsigned long unmap_pages;
291 size_t size;
292
293 /* Get physical address */
294 phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
295
296 if (!phys) {
297 gfn++;
298 continue;
299 }
300
301 pfn = phys >> PAGE_SHIFT;
302
303 /* Unmap address from IO address space */
304 size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
305 unmap_pages = 1ULL << get_order(size);
306
307 /* Unpin all pages we just unmapped to not leak any memory */
308 kvm_unpin_pages(kvm, pfn, unmap_pages);
309
310 gfn += unmap_pages;
311
312 cond_resched();
313 }
314 }
315
316 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
317 {
318 kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
319 }
320
321 static int kvm_iommu_unmap_memslots(struct kvm *kvm)
322 {
323 int idx;
324 struct kvm_memslots *slots;
325 struct kvm_memory_slot *memslot;
326
327 idx = srcu_read_lock(&kvm->srcu);
328 slots = kvm_memslots(kvm);
329
330 kvm_for_each_memslot(memslot, slots)
331 kvm_iommu_unmap_pages(kvm, memslot);
332
333 srcu_read_unlock(&kvm->srcu, idx);
334
335 if (kvm->arch.iommu_noncoherent)
336 kvm_arch_unregister_noncoherent_dma(kvm);
337
338 return 0;
339 }
340
341 int kvm_iommu_unmap_guest(struct kvm *kvm)
342 {
343 struct iommu_domain *domain = kvm->arch.iommu_domain;
344
345 /* check if iommu exists and in use */
346 if (!domain)
347 return 0;
348
349 mutex_lock(&kvm->slots_lock);
350 kvm_iommu_unmap_memslots(kvm);
351 kvm->arch.iommu_domain = NULL;
352 kvm->arch.iommu_noncoherent = false;
353 mutex_unlock(&kvm->slots_lock);
354
355 iommu_domain_free(domain);
356 return 0;
357 }
This page took 0.0387 seconds and 6 git commands to generate.