Merge tag 'for-linus-20130909' of git://git.infradead.org/linux-mtd
[deliverable/linux.git] / arch / powerpc / platforms / pseries / lpar.c
CommitLineData
1da177e4
LT
1/*
2 * pSeries_lpar.c
3 * Copyright (C) 2001 Todd Inglett, IBM Corporation
4 *
5 * pSeries LPAR support.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
f7ebf352
ME
22/* Enables debugging of low-level hash table routines - careful! */
23#undef DEBUG
1da177e4 24
1da177e4
LT
25#include <linux/kernel.h>
26#include <linux/dma-mapping.h>
463ce0e1 27#include <linux/console.h>
66b15db6 28#include <linux/export.h>
1da177e4
LT
29#include <asm/processor.h>
30#include <asm/mmu.h>
31#include <asm/page.h>
32#include <asm/pgtable.h>
33#include <asm/machdep.h>
1da177e4 34#include <asm/mmu_context.h>
1da177e4
LT
35#include <asm/iommu.h>
36#include <asm/tlbflush.h>
37#include <asm/tlb.h>
38#include <asm/prom.h>
1da177e4 39#include <asm/cputable.h>
dcad47fc 40#include <asm/udbg.h>
2249ca9d 41#include <asm/smp.h>
c8cd093a 42#include <asm/trace.h>
f5339277 43#include <asm/firmware.h>
212bebb4 44#include <asm/plpar_wrappers.h>
a1218720 45
21cf9133 46#include "pseries.h"
1da177e4 47
1a527286
AK
48/* Flag bits for H_BULK_REMOVE */
49#define HBR_REQUEST 0x4000000000000000UL
50#define HBR_RESPONSE 0x8000000000000000UL
51#define HBR_END 0xc000000000000000UL
52#define HBR_AVPN 0x0200000000000000UL
53#define HBR_ANDCOND 0x0100000000000000UL
54
1da177e4 55
b9377ffc 56/* in hvCall.S */
1da177e4 57EXPORT_SYMBOL(plpar_hcall);
b9377ffc 58EXPORT_SYMBOL(plpar_hcall9);
1da177e4 59EXPORT_SYMBOL(plpar_hcall_norets);
b9377ffc 60
1da177e4
LT
61extern void pSeries_find_serial_port(void);
62
1da177e4
LT
63void vpa_init(int cpu)
64{
65 int hwcpu = get_hard_smp_processor_id(cpu);
2f6093c8 66 unsigned long addr;
1da177e4 67 long ret;
cf9efce0
PM
68 struct paca_struct *pp;
69 struct dtl_entry *dtl;
233ccd0d 70
b89bdfb8
ME
71 /*
72 * The spec says it "may be problematic" if CPU x registers the VPA of
73 * CPU y. We should never do that, but wail if we ever do.
74 */
75 WARN_ON(cpu != smp_processor_id());
76
233ccd0d 77 if (cpu_has_feature(CPU_FTR_ALTIVEC))
8154c5d2 78 lppaca_of(cpu).vmxregs_in_use = 1;
233ccd0d 79
6e0b8bc9
ME
80 if (cpu_has_feature(CPU_FTR_ARCH_207S))
81 lppaca_of(cpu).ebb_regs_in_use = 1;
82
8154c5d2 83 addr = __pa(&lppaca_of(cpu));
2f6093c8 84 ret = register_vpa(hwcpu, addr);
1da177e4 85
2f6093c8 86 if (ret) {
711ef84e
AB
87 pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
88 "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
2f6093c8
MN
89 return;
90 }
91 /*
92 * PAPR says this feature is SLB-Buffer but firmware never
93 * reports that. All SPLPAR support SLB shadow buffer.
94 */
95 addr = __pa(&slb_shadow[cpu]);
96 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
97 ret = register_slb_shadow(hwcpu, addr);
98 if (ret)
711ef84e
AB
99 pr_err("WARNING: SLB shadow buffer registration for "
100 "cpu %d (hw %d) of area %lx failed with %ld\n",
101 cpu, hwcpu, addr, ret);
2f6093c8 102 }
cf9efce0
PM
103
104 /*
105 * Register dispatch trace log, if one has been allocated.
106 */
107 pp = &paca[cpu];
108 dtl = pp->dispatch_log;
109 if (dtl) {
110 pp->dtl_ridx = 0;
111 pp->dtl_curr = dtl;
112 lppaca_of(cpu).dtl_idx = 0;
113
114 /* hypervisor reads buffer length from this field */
7ffcf8ec 115 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
cf9efce0
PM
116 ret = register_dtl(hwcpu, __pa(dtl));
117 if (ret)
711ef84e
AB
118 pr_err("WARNING: DTL registration of cpu %d (hw %d) "
119 "failed with %ld\n", smp_processor_id(),
120 hwcpu, ret);
cf9efce0
PM
121 lppaca_of(cpu).dtl_enable_mask = 2;
122 }
1da177e4
LT
123}
124
035223fb 125static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
5524a27d
AK
126 unsigned long vpn, unsigned long pa,
127 unsigned long rflags, unsigned long vflags,
b1022fbd 128 int psize, int apsize, int ssize)
1da177e4 129{
1da177e4
LT
130 unsigned long lpar_rc;
131 unsigned long flags;
132 unsigned long slot;
96e28449 133 unsigned long hpte_v, hpte_r;
1da177e4 134
3c726f8d 135 if (!(vflags & HPTE_V_BOLTED))
5524a27d
AK
136 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
137 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
138 hpte_group, vpn, pa, rflags, vflags, psize);
3c726f8d 139
b1022fbd
AK
140 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
141 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
3c726f8d
BH
142
143 if (!(vflags & HPTE_V_BOLTED))
551a232c 144 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
3c726f8d 145
1da177e4
LT
146 /* Now fill in the actual HPTE */
147 /* Set CEC cookie to 0 */
148 /* Zero page = 0 */
149 /* I-cache Invalidate = 0 */
150 /* I-cache synchronize = 0 */
151 /* Exact = 0 */
152 flags = 0;
153
3c726f8d 154 /* Make pHyp happy */
ad92c615 155 if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU))
96e28449 156 hpte_r &= ~_PAGE_COHERENT;
9ee820fa
BK
157 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
158 flags |= H_COALESCE_CAND;
1da177e4 159
b9377ffc 160 lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
706c8c93 161 if (unlikely(lpar_rc == H_PTEG_FULL)) {
3c726f8d 162 if (!(vflags & HPTE_V_BOLTED))
551a232c 163 pr_devel(" full\n");
1da177e4 164 return -1;
3c726f8d 165 }
1da177e4
LT
166
167 /*
168 * Since we try and ioremap PHBs we don't own, the pte insert
169 * will fail. However we must catch the failure in hash_page
170 * or we will loop forever, so return -2 in this case.
171 */
706c8c93 172 if (unlikely(lpar_rc != H_SUCCESS)) {
3c726f8d 173 if (!(vflags & HPTE_V_BOLTED))
4b8f63d9 174 pr_devel(" lpar err %ld\n", lpar_rc);
1da177e4 175 return -2;
3c726f8d
BH
176 }
177 if (!(vflags & HPTE_V_BOLTED))
551a232c 178 pr_devel(" -> slot: %lu\n", slot & 7);
1da177e4
LT
179
180 /* Because of iSeries, we have to pass down the secondary
181 * bucket bit here as well
182 */
96e28449 183 return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
1da177e4
LT
184}
185
186static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
187
188static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
189{
190 unsigned long slot_offset;
191 unsigned long lpar_rc;
192 int i;
193 unsigned long dummy1, dummy2;
194
195 /* pick a random slot to start at */
196 slot_offset = mftb() & 0x7;
197
198 for (i = 0; i < HPTES_PER_GROUP; i++) {
199
200 /* don't remove a bolted entry */
201 lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
202 (0x1UL << 4), &dummy1, &dummy2);
706c8c93 203 if (lpar_rc == H_SUCCESS)
1da177e4 204 return i;
9fb26401
MW
205
206 /*
207 * The test for adjunct partition is performed before the
208 * ANDCOND test. H_RESOURCE may be returned, so we need to
209 * check for that as well.
210 */
211 BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
1da177e4
LT
212
213 slot_offset++;
214 slot_offset &= 0x7;
215 }
216
217 return -1;
218}
219
220static void pSeries_lpar_hptab_clear(void)
221{
222 unsigned long size_bytes = 1UL << ppc64_pft_size;
223 unsigned long hpte_count = size_bytes >> 4;
d504bed6
MN
224 struct {
225 unsigned long pteh;
226 unsigned long ptel;
227 } ptes[4];
b7abc5c5 228 long lpar_rc;
bed9a315 229 unsigned long i, j;
d504bed6
MN
230
231 /* Read in batches of 4,
232 * invalidate only valid entries not in the VRMA
233 * hpte_count will be a multiple of 4
234 */
235 for (i = 0; i < hpte_count; i += 4) {
236 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
237 if (lpar_rc != H_SUCCESS)
238 continue;
239 for (j = 0; j < 4; j++){
240 if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
241 HPTE_V_VRMA_MASK)
242 continue;
243 if (ptes[j].pteh & HPTE_V_VALID)
244 plpar_pte_remove_raw(0, i + j, 0,
245 &(ptes[j].pteh), &(ptes[j].ptel));
b7abc5c5
SS
246 }
247 }
1da177e4
LT
248}
249
250/*
251 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
252 * the low 3 bits of flags happen to line up. So no transform is needed.
253 * We can probably optimize here and assume the high bits of newpp are
254 * already zero. For now I am paranoid.
255 */
3c726f8d
BH
256static long pSeries_lpar_hpte_updatepp(unsigned long slot,
257 unsigned long newpp,
5524a27d 258 unsigned long vpn,
db3d8534
AK
259 int psize, int apsize,
260 int ssize, int local)
1da177e4
LT
261{
262 unsigned long lpar_rc;
263 unsigned long flags = (newpp & 7) | H_AVPN;
3c726f8d 264 unsigned long want_v;
1da177e4 265
5524a27d 266 want_v = hpte_encode_avpn(vpn, psize, ssize);
1da177e4 267
551a232c 268 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
f7ebf352 269 want_v, slot, flags, psize);
1da177e4 270
1189be65 271 lpar_rc = plpar_pte_protect(flags, slot, want_v);
3c726f8d 272
706c8c93 273 if (lpar_rc == H_NOT_FOUND) {
551a232c 274 pr_devel("not found !\n");
1da177e4 275 return -1;
3c726f8d
BH
276 }
277
551a232c 278 pr_devel("ok\n");
1da177e4 279
706c8c93 280 BUG_ON(lpar_rc != H_SUCCESS);
1da177e4
LT
281
282 return 0;
283}
284
285static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
286{
287 unsigned long dword0;
288 unsigned long lpar_rc;
289 unsigned long dummy_word1;
290 unsigned long flags;
291
292 /* Read 1 pte at a time */
293 /* Do not need RPN to logical page translation */
294 /* No cross CEC PFT access */
295 flags = 0;
296
297 lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);
298
706c8c93 299 BUG_ON(lpar_rc != H_SUCCESS);
1da177e4
LT
300
301 return dword0;
302}
303
5524a27d 304static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
1da177e4
LT
305{
306 unsigned long hash;
1189be65 307 unsigned long i;
1da177e4 308 long slot;
3c726f8d 309 unsigned long want_v, hpte_v;
1da177e4 310
5524a27d
AK
311 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
312 want_v = hpte_encode_avpn(vpn, psize, ssize);
1189be65
PM
313
314 /* Bolted entries are always in the primary group */
315 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
316 for (i = 0; i < HPTES_PER_GROUP; i++) {
317 hpte_v = pSeries_lpar_hpte_getword0(slot);
318
319 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
320 /* HPTE matches */
321 return slot;
322 ++slot;
1da177e4
LT
323 }
324
325 return -1;
326}
327
328static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
3c726f8d 329 unsigned long ea,
1189be65 330 int psize, int ssize)
1da177e4 331{
5524a27d
AK
332 unsigned long vpn;
333 unsigned long lpar_rc, slot, vsid, flags;
1da177e4 334
1189be65 335 vsid = get_kernel_vsid(ea, ssize);
5524a27d 336 vpn = hpt_vpn(ea, vsid, ssize);
1da177e4 337
5524a27d 338 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
1da177e4
LT
339 BUG_ON(slot == -1);
340
341 flags = newpp & 7;
342 lpar_rc = plpar_pte_protect(flags, slot, 0);
343
706c8c93 344 BUG_ON(lpar_rc != H_SUCCESS);
1da177e4
LT
345}
346
5524a27d 347static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
db3d8534
AK
348 int psize, int apsize,
349 int ssize, int local)
1da177e4 350{
3c726f8d 351 unsigned long want_v;
1da177e4
LT
352 unsigned long lpar_rc;
353 unsigned long dummy1, dummy2;
354
5524a27d
AK
355 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
356 slot, vpn, psize, local);
1da177e4 357
5524a27d 358 want_v = hpte_encode_avpn(vpn, psize, ssize);
1189be65 359 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
706c8c93 360 if (lpar_rc == H_NOT_FOUND)
1da177e4
LT
361 return;
362
706c8c93 363 BUG_ON(lpar_rc != H_SUCCESS);
1da177e4
LT
364}
365
1a527286
AK
366/*
367 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
368 * to make sure that we avoid bouncing the hypervisor tlbie lock.
369 */
370#define PPC64_HUGE_HPTE_BATCH 12
371
372static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
373 unsigned long *vpn, int count,
374 int psize, int ssize)
375{
376 unsigned long param[8];
377 int i = 0, pix = 0, rc;
378 unsigned long flags = 0;
379 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
380
381 if (lock_tlbie)
382 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
383
384 for (i = 0; i < count; i++) {
385
386 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
387 pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
388 ssize, 0);
389 } else {
390 param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
391 param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
392 pix += 2;
393 if (pix == 8) {
394 rc = plpar_hcall9(H_BULK_REMOVE, param,
395 param[0], param[1], param[2],
396 param[3], param[4], param[5],
397 param[6], param[7]);
398 BUG_ON(rc != H_SUCCESS);
399 pix = 0;
400 }
401 }
402 }
403 if (pix) {
404 param[pix] = HBR_END;
405 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
406 param[2], param[3], param[4], param[5],
407 param[6], param[7]);
408 BUG_ON(rc != H_SUCCESS);
409 }
410
411 if (lock_tlbie)
412 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
413}
414
415static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
416 unsigned char *hpte_slot_array,
417 unsigned long addr, int psize)
418{
419 int ssize = 0, i, index = 0;
420 unsigned long s_addr = addr;
421 unsigned int max_hpte_count, valid;
422 unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
423 unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
424 unsigned long shift, hidx, vpn = 0, vsid, hash, slot;
425
426 shift = mmu_psize_defs[psize].shift;
427 max_hpte_count = 1U << (PMD_SHIFT - shift);
428
429 for (i = 0; i < max_hpte_count; i++) {
430 valid = hpte_valid(hpte_slot_array, i);
431 if (!valid)
432 continue;
433 hidx = hpte_hash_index(hpte_slot_array, i);
434
435 /* get the vpn */
436 addr = s_addr + (i * (1ul << shift));
437 if (!is_kernel_addr(addr)) {
438 ssize = user_segment_size(addr);
439 vsid = get_vsid(mm->context.id, addr, ssize);
440 WARN_ON(vsid == 0);
441 } else {
442 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
443 ssize = mmu_kernel_ssize;
444 }
445
446 vpn = hpt_vpn(addr, vsid, ssize);
447 hash = hpt_hash(vpn, shift, ssize);
448 if (hidx & _PTEIDX_SECONDARY)
449 hash = ~hash;
450
451 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
452 slot += hidx & _PTEIDX_GROUP_IX;
453
454 slot_array[index] = slot;
455 vpn_array[index] = vpn;
456 if (index == PPC64_HUGE_HPTE_BATCH - 1) {
457 /*
458 * Now do a bluk invalidate
459 */
460 __pSeries_lpar_hugepage_invalidate(slot_array,
461 vpn_array,
462 PPC64_HUGE_HPTE_BATCH,
463 psize, ssize);
464 index = 0;
465 } else
466 index++;
467 }
468 if (index)
469 __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
470 index, psize, ssize);
471}
472
f8c8803b
BP
473static void pSeries_lpar_hpte_removebolted(unsigned long ea,
474 int psize, int ssize)
475{
5524a27d
AK
476 unsigned long vpn;
477 unsigned long slot, vsid;
f8c8803b
BP
478
479 vsid = get_kernel_vsid(ea, ssize);
5524a27d 480 vpn = hpt_vpn(ea, vsid, ssize);
f8c8803b 481
5524a27d 482 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
f8c8803b 483 BUG_ON(slot == -1);
db3d8534
AK
484 /*
485 * lpar doesn't use the passed actual page size
486 */
487 pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
f8c8803b
BP
488}
489
1da177e4
LT
490/*
491 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
492 * lock.
493 */
035223fb 494static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
1da177e4 495{
5524a27d 496 unsigned long vpn;
f03e64f2 497 unsigned long i, pix, rc;
12e86f92 498 unsigned long flags = 0;
1da177e4 499 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
44ae3ab3 500 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
f03e64f2 501 unsigned long param[9];
f03e64f2
PM
502 unsigned long hash, index, shift, hidx, slot;
503 real_pte_t pte;
1189be65 504 int psize, ssize;
1da177e4
LT
505
506 if (lock_tlbie)
507 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
508
f03e64f2 509 psize = batch->psize;
1189be65 510 ssize = batch->ssize;
f03e64f2
PM
511 pix = 0;
512 for (i = 0; i < number; i++) {
5524a27d 513 vpn = batch->vpn[i];
f03e64f2 514 pte = batch->pte[i];
5524a27d
AK
515 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
516 hash = hpt_hash(vpn, shift, ssize);
f03e64f2
PM
517 hidx = __rpte_to_hidx(pte, index);
518 if (hidx & _PTEIDX_SECONDARY)
519 hash = ~hash;
520 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
521 slot += hidx & _PTEIDX_GROUP_IX;
12e86f92 522 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
db3d8534
AK
523 /*
524 * lpar doesn't use the passed actual page size
525 */
5524a27d 526 pSeries_lpar_hpte_invalidate(slot, vpn, psize,
db3d8534 527 0, ssize, local);
12e86f92
PM
528 } else {
529 param[pix] = HBR_REQUEST | HBR_AVPN | slot;
5524a27d 530 param[pix+1] = hpte_encode_avpn(vpn, psize,
1189be65 531 ssize);
12e86f92
PM
532 pix += 2;
533 if (pix == 8) {
534 rc = plpar_hcall9(H_BULK_REMOVE, param,
f03e64f2
PM
535 param[0], param[1], param[2],
536 param[3], param[4], param[5],
537 param[6], param[7]);
12e86f92
PM
538 BUG_ON(rc != H_SUCCESS);
539 pix = 0;
540 }
f03e64f2
PM
541 }
542 } pte_iterate_hashed_end();
543 }
544 if (pix) {
545 param[pix] = HBR_END;
546 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
547 param[2], param[3], param[4], param[5],
548 param[6], param[7]);
549 BUG_ON(rc != H_SUCCESS);
550 }
1da177e4
LT
551
552 if (lock_tlbie)
553 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
554}
555
4e89a2d8
WS
556static int __init disable_bulk_remove(char *str)
557{
558 if (strcmp(str, "off") == 0 &&
559 firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
560 printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
561 powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
562 }
563 return 1;
564}
565
566__setup("bulk_remove=", disable_bulk_remove);
567
7d0daae4 568void __init hpte_init_lpar(void)
1da177e4
LT
569{
570 ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate;
571 ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp;
572 ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
573 ppc_md.hpte_insert = pSeries_lpar_hpte_insert;
574 ppc_md.hpte_remove = pSeries_lpar_hpte_remove;
f8c8803b 575 ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted;
1da177e4
LT
576 ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range;
577 ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear;
1a527286 578 ppc_md.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
1da177e4 579}
14f966e7
RJ
580
581#ifdef CONFIG_PPC_SMLPAR
582#define CMO_FREE_HINT_DEFAULT 1
583static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;
584
585static int __init cmo_free_hint(char *str)
586{
587 char *parm;
588 parm = strstrip(str);
589
590 if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
591 printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
592 cmo_free_hint_flag = 0;
593 return 1;
594 }
595
596 cmo_free_hint_flag = 1;
597 printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");
598
599 if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
600 return 1;
601
602 return 0;
603}
604
605__setup("cmo_free_hint=", cmo_free_hint);
606
607static void pSeries_set_page_state(struct page *page, int order,
608 unsigned long state)
609{
610 int i, j;
611 unsigned long cmo_page_sz, addr;
612
613 cmo_page_sz = cmo_get_page_size();
614 addr = __pa((unsigned long)page_address(page));
615
616 for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
617 for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
618 plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
619 }
620}
621
622void arch_free_page(struct page *page, int order)
623{
624 if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
625 return;
626
627 pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
628}
629EXPORT_SYMBOL(arch_free_page);
630
631#endif
c8cd093a
AB
632
633#ifdef CONFIG_TRACEPOINTS
634/*
635 * We optimise our hcall path by placing hcall_tracepoint_refcount
636 * directly in the TOC so we can check if the hcall tracepoints are
637 * enabled via a single load.
638 */
639
640/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
641extern long hcall_tracepoint_refcount;
642
57cdfdf8
AB
643/*
644 * Since the tracing code might execute hcalls we need to guard against
645 * recursion. One example of this are spinlocks calling H_YIELD on
646 * shared processor partitions.
647 */
648static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
649
c8cd093a
AB
650void hcall_tracepoint_regfunc(void)
651{
652 hcall_tracepoint_refcount++;
653}
654
655void hcall_tracepoint_unregfunc(void)
656{
657 hcall_tracepoint_refcount--;
658}
659
6f26353c 660void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
c8cd093a 661{
57cdfdf8
AB
662 unsigned long flags;
663 unsigned int *depth;
664
a5ccfee0
AB
665 /*
666 * We cannot call tracepoints inside RCU idle regions which
667 * means we must not trace H_CEDE.
668 */
669 if (opcode == H_CEDE)
670 return;
671
57cdfdf8
AB
672 local_irq_save(flags);
673
674 depth = &__get_cpu_var(hcall_trace_depth);
675
676 if (*depth)
677 goto out;
678
679 (*depth)++;
e4f387d8 680 preempt_disable();
6f26353c 681 trace_hcall_entry(opcode, args);
57cdfdf8
AB
682 (*depth)--;
683
684out:
685 local_irq_restore(flags);
c8cd093a
AB
686}
687
6f26353c
AB
688void __trace_hcall_exit(long opcode, unsigned long retval,
689 unsigned long *retbuf)
c8cd093a 690{
57cdfdf8
AB
691 unsigned long flags;
692 unsigned int *depth;
693
a5ccfee0
AB
694 if (opcode == H_CEDE)
695 return;
696
57cdfdf8
AB
697 local_irq_save(flags);
698
699 depth = &__get_cpu_var(hcall_trace_depth);
700
701 if (*depth)
702 goto out;
703
704 (*depth)++;
6f26353c 705 trace_hcall_exit(opcode, retval, retbuf);
e4f387d8 706 preempt_enable();
57cdfdf8
AB
707 (*depth)--;
708
709out:
710 local_irq_restore(flags);
c8cd093a
AB
711}
712#endif
9ee820fa
BK
713
714/**
715 * h_get_mpp
716 * H_GET_MPP hcall returns info in 7 parms
717 */
718int h_get_mpp(struct hvcall_mpp_data *mpp_data)
719{
720 int rc;
721 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
722
723 rc = plpar_hcall9(H_GET_MPP, retbuf);
724
725 mpp_data->entitled_mem = retbuf[0];
726 mpp_data->mapped_mem = retbuf[1];
727
728 mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
729 mpp_data->pool_num = retbuf[2] & 0xffff;
730
731 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
732 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
b0d436c7 733 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
9ee820fa
BK
734
735 mpp_data->pool_size = retbuf[4];
736 mpp_data->loan_request = retbuf[5];
737 mpp_data->backing_mem = retbuf[6];
738
739 return rc;
740}
741EXPORT_SYMBOL(h_get_mpp);
742
743int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
744{
745 int rc;
746 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
747
748 rc = plpar_hcall9(H_GET_MPP_X, retbuf);
749
750 mpp_x_data->coalesced_bytes = retbuf[0];
751 mpp_x_data->pool_coalesced_bytes = retbuf[1];
752 mpp_x_data->pool_purr_cycles = retbuf[2];
753 mpp_x_data->pool_spurr_cycles = retbuf[3];
754
755 return rc;
756}
This page took 0.941726 seconds and 5 git commands to generate.