| 1 | /* |
| 2 | * linux/arch/cris/arch-v10/mm/tlb.c |
| 3 | * |
| 4 | * Low level TLB handling |
| 5 | * |
| 6 | * |
| 7 | * Copyright (C) 2000-2007 Axis Communications AB |
| 8 | * |
| 9 | * Authors: Bjorn Wesen (bjornw@axis.com) |
| 10 | * |
| 11 | */ |
| 12 | |
| 13 | #include <asm/tlb.h> |
| 14 | #include <asm/mmu_context.h> |
| 15 | #include <arch/svinto.h> |
| 16 | |
| 17 | #define D(x) |
| 18 | |
| 19 | /* The TLB can host up to 64 different mm contexts at the same time. |
| 20 | * The running context is R_MMU_CONTEXT, and each TLB entry contains a |
| 21 | * page_id that has to match to give a hit. In page_id_map, we keep track |
| 22 | * of which mm's we have assigned which page_id's, so that we know when |
| 23 | * to invalidate TLB entries. |
| 24 | * |
| 25 | * The last page_id is never running - it is used as an invalid page_id |
| 26 | * so we can make TLB entries that will never match. |
| 27 | * |
| 28 | * Notice that we need to make the flushes atomic, otherwise an interrupt |
| 29 | * handler that uses vmalloced memory might cause a TLB load in the middle |
| 30 | * of a flush causing. |
| 31 | */ |
| 32 | |
| 33 | /* invalidate all TLB entries */ |
| 34 | |
| 35 | void |
| 36 | flush_tlb_all(void) |
| 37 | { |
| 38 | int i; |
| 39 | unsigned long flags; |
| 40 | |
| 41 | /* the vpn of i & 0xf is so we dont write similar TLB entries |
| 42 | * in the same 4-way entry group. details... |
| 43 | */ |
| 44 | |
| 45 | local_irq_save(flags); |
| 46 | for(i = 0; i < NUM_TLB_ENTRIES; i++) { |
| 47 | *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) ); |
| 48 | *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | |
| 49 | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); |
| 50 | |
| 51 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | |
| 52 | IO_STATE(R_TLB_LO, valid, no ) | |
| 53 | IO_STATE(R_TLB_LO, kernel,no ) | |
| 54 | IO_STATE(R_TLB_LO, we, no ) | |
| 55 | IO_FIELD(R_TLB_LO, pfn, 0 ) ); |
| 56 | } |
| 57 | local_irq_restore(flags); |
| 58 | D(printk("tlb: flushed all\n")); |
| 59 | } |
| 60 | |
| 61 | /* invalidate the selected mm context only */ |
| 62 | |
| 63 | void |
| 64 | flush_tlb_mm(struct mm_struct *mm) |
| 65 | { |
| 66 | int i; |
| 67 | int page_id = mm->context.page_id; |
| 68 | unsigned long flags; |
| 69 | |
| 70 | D(printk("tlb: flush mm context %d (%p)\n", page_id, mm)); |
| 71 | |
| 72 | if(page_id == NO_CONTEXT) |
| 73 | return; |
| 74 | |
| 75 | /* mark the TLB entries that match the page_id as invalid. |
| 76 | * here we could also check the _PAGE_GLOBAL bit and NOT flush |
| 77 | * global pages. is it worth the extra I/O ? |
| 78 | */ |
| 79 | |
| 80 | local_irq_save(flags); |
| 81 | for(i = 0; i < NUM_TLB_ENTRIES; i++) { |
| 82 | *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); |
| 83 | if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) { |
| 84 | *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | |
| 85 | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); |
| 86 | |
| 87 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | |
| 88 | IO_STATE(R_TLB_LO, valid, no ) | |
| 89 | IO_STATE(R_TLB_LO, kernel,no ) | |
| 90 | IO_STATE(R_TLB_LO, we, no ) | |
| 91 | IO_FIELD(R_TLB_LO, pfn, 0 ) ); |
| 92 | } |
| 93 | } |
| 94 | local_irq_restore(flags); |
| 95 | } |
| 96 | |
| 97 | /* invalidate a single page */ |
| 98 | |
| 99 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) |
| 100 | { |
| 101 | struct mm_struct *mm = vma->vm_mm; |
| 102 | int page_id = mm->context.page_id; |
| 103 | int i; |
| 104 | unsigned long flags; |
| 105 | |
| 106 | D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm)); |
| 107 | |
| 108 | if(page_id == NO_CONTEXT) |
| 109 | return; |
| 110 | |
| 111 | addr &= PAGE_MASK; /* perhaps not necessary */ |
| 112 | |
| 113 | /* invalidate those TLB entries that match both the mm context |
| 114 | * and the virtual address requested |
| 115 | */ |
| 116 | |
| 117 | local_irq_save(flags); |
| 118 | for(i = 0; i < NUM_TLB_ENTRIES; i++) { |
| 119 | unsigned long tlb_hi; |
| 120 | *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); |
| 121 | tlb_hi = *R_TLB_HI; |
| 122 | if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id && |
| 123 | (tlb_hi & PAGE_MASK) == addr) { |
| 124 | *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | |
| 125 | addr; /* same addr as before works. */ |
| 126 | |
| 127 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | |
| 128 | IO_STATE(R_TLB_LO, valid, no ) | |
| 129 | IO_STATE(R_TLB_LO, kernel,no ) | |
| 130 | IO_STATE(R_TLB_LO, we, no ) | |
| 131 | IO_FIELD(R_TLB_LO, pfn, 0 ) ); |
| 132 | } |
| 133 | } |
| 134 | local_irq_restore(flags); |
| 135 | } |
| 136 | |
| 137 | /* dump the entire TLB for debug purposes */ |
| 138 | |
| 139 | #if 0 |
| 140 | void |
| 141 | dump_tlb_all(void) |
| 142 | { |
| 143 | int i; |
| 144 | unsigned long flags; |
| 145 | |
| 146 | printk("TLB dump. LO is: pfn | reserved | global | valid | kernel | we |\n"); |
| 147 | |
| 148 | local_save_flags(flags); |
| 149 | local_irq_disable(); |
| 150 | for(i = 0; i < NUM_TLB_ENTRIES; i++) { |
| 151 | *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) ); |
| 152 | printk("Entry %d: HI 0x%08lx, LO 0x%08lx\n", |
| 153 | i, *R_TLB_HI, *R_TLB_LO); |
| 154 | } |
| 155 | local_irq_restore(flags); |
| 156 | } |
| 157 | #endif |
| 158 | |
| 159 | /* |
| 160 | * Initialize the context related info for a new mm_struct |
| 161 | * instance. |
| 162 | */ |
| 163 | |
| 164 | int |
| 165 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 166 | { |
| 167 | mm->context.page_id = NO_CONTEXT; |
| 168 | return 0; |
| 169 | } |
| 170 | |
| 171 | /* called in schedule() just before actually doing the switch_to */ |
| 172 | |
| 173 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| 174 | struct task_struct *tsk) |
| 175 | { |
| 176 | if (prev != next) { |
| 177 | /* make sure we have a context */ |
| 178 | get_mmu_context(next); |
| 179 | |
| 180 | /* remember the pgd for the fault handlers |
| 181 | * this is similar to the pgd register in some other CPU's. |
| 182 | * we need our own copy of it because current and active_mm |
| 183 | * might be invalid at points where we still need to derefer |
| 184 | * the pgd. |
| 185 | */ |
| 186 | |
| 187 | per_cpu(current_pgd, smp_processor_id()) = next->pgd; |
| 188 | |
| 189 | /* switch context in the MMU */ |
| 190 | |
| 191 | D(printk(KERN_DEBUG "switching mmu_context to %d (%p)\n", |
| 192 | next->context, next)); |
| 193 | |
| 194 | *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT, |
| 195 | page_id, next->context.page_id); |
| 196 | } |
| 197 | } |
| 198 | |