| 1 | /* |
|---|
| 2 | * linux/arch/i386/mm/fault.c |
|---|
| 3 | * |
|---|
| 4 | * Copyright (C) 1995 Linus Torvalds |
|---|
| 5 | */ |
|---|
| 6 | |
|---|
| 7 | #include <linux/signal.h> |
|---|
| 8 | #include <linux/sched.h> |
|---|
| 9 | #include <linux/kernel.h> |
|---|
| 10 | #include <linux/errno.h> |
|---|
| 11 | #include <linux/string.h> |
|---|
| 12 | #include <linux/types.h> |
|---|
| 13 | #include <linux/ptrace.h> |
|---|
| 14 | #include <linux/mman.h> |
|---|
| 15 | #include <linux/mm.h> |
|---|
| 16 | #include <linux/smp.h> |
|---|
| 17 | #include <linux/smp_lock.h> |
|---|
| 18 | #include <linux/interrupt.h> |
|---|
| 19 | #include <linux/init.h> |
|---|
| 20 | #include <linux/tty.h> |
|---|
| 21 | #include <linux/vt_kern.h> /* For unblank_screen() */ |
|---|
| 22 | #include <linux/highmem.h> |
|---|
| 23 | #include <linux/module.h> |
|---|
| 24 | #include <linux/kprobes.h> |
|---|
| 25 | |
|---|
| 26 | #include <asm/system.h> |
|---|
| 27 | #include <asm/uaccess.h> |
|---|
| 28 | #include <asm/desc.h> |
|---|
| 29 | #include <asm/kdebug.h> |
|---|
| 30 | |
|---|
| 31 | extern void die(const char *,struct pt_regs *,long); |
|---|
| 32 | |
|---|
| 33 | #ifdef CONFIG_KPROBES |
|---|
| 34 | ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); |
|---|
| 35 | int register_page_fault_notifier(struct notifier_block *nb) |
|---|
| 36 | { |
|---|
| 37 | vmalloc_sync_all(); |
|---|
| 38 | return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); |
|---|
| 39 | } |
|---|
| 40 | |
|---|
| 41 | int unregister_page_fault_notifier(struct notifier_block *nb) |
|---|
| 42 | { |
|---|
| 43 | return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); |
|---|
| 44 | } |
|---|
| 45 | |
|---|
| 46 | static inline int notify_page_fault(enum die_val val, const char *str, |
|---|
| 47 | struct pt_regs *regs, long err, int trap, int sig) |
|---|
| 48 | { |
|---|
| 49 | struct die_args args = { |
|---|
| 50 | .regs = regs, |
|---|
| 51 | .str = str, |
|---|
| 52 | .err = err, |
|---|
| 53 | .trapnr = trap, |
|---|
| 54 | .signr = sig |
|---|
| 55 | }; |
|---|
| 56 | return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); |
|---|
| 57 | } |
|---|
| 58 | #else |
|---|
| 59 | static inline int notify_page_fault(enum die_val val, const char *str, |
|---|
| 60 | struct pt_regs *regs, long err, int trap, int sig) |
|---|
| 61 | { |
|---|
| 62 | return NOTIFY_DONE; |
|---|
| 63 | } |
|---|
| 64 | #endif |
|---|
| 65 | |
|---|
| 66 | |
|---|
| 67 | /* |
|---|
| 68 | * Unlock any spinlocks which will prevent us from getting the |
|---|
| 69 | * message out |
|---|
| 70 | */ |
|---|
| 71 | void bust_spinlocks(int yes) |
|---|
| 72 | { |
|---|
| 73 | int loglevel_save = console_loglevel; |
|---|
| 74 | |
|---|
| 75 | if (yes) { |
|---|
| 76 | oops_in_progress = 1; |
|---|
| 77 | return; |
|---|
| 78 | } |
|---|
| 79 | #ifdef CONFIG_VT |
|---|
| 80 | unblank_screen(); |
|---|
| 81 | #endif |
|---|
| 82 | oops_in_progress = 0; |
|---|
| 83 | /* |
|---|
| 84 | * OK, the message is on the console. Now we call printk() |
|---|
| 85 | * without oops_in_progress set so that printk will give klogd |
|---|
| 86 | * a poke. Hold onto your hats... |
|---|
| 87 | */ |
|---|
| 88 | console_loglevel = 15; /* NMI oopser may have shut the console up */ |
|---|
| 89 | printk(" "); |
|---|
| 90 | console_loglevel = loglevel_save; |
|---|
| 91 | } |
|---|
| 92 | |
|---|
| 93 | /* |
|---|
| 94 | * Return EIP plus the CS segment base. The segment limit is also |
|---|
| 95 | * adjusted, clamped to the kernel/user address space (whichever is |
|---|
| 96 | * appropriate), and returned in *eip_limit. |
|---|
| 97 | * |
|---|
| 98 | * The segment is checked, because it might have been changed by another |
|---|
| 99 | * task between the original faulting instruction and here. |
|---|
| 100 | * |
|---|
| 101 | * If CS is no longer a valid code segment, or if EIP is beyond the |
|---|
| 102 | * limit, or if it is a kernel address when CS is not a kernel segment, |
|---|
| 103 | * then the returned value will be greater than *eip_limit. |
|---|
| 104 | * |
|---|
| 105 | * This is slow, but is very rarely executed. |
|---|
| 106 | */ |
|---|
| 107 | static inline unsigned long get_segment_eip(struct pt_regs *regs, |
|---|
| 108 | unsigned long *eip_limit) |
|---|
| 109 | { |
|---|
| 110 | unsigned long eip = regs->eip; |
|---|
| 111 | unsigned seg = regs->xcs & 0xffff; |
|---|
| 112 | u32 seg_ar, seg_limit, base, *desc; |
|---|
| 113 | |
|---|
| 114 | /* Unlikely, but must come before segment checks. */ |
|---|
| 115 | if (unlikely(regs->eflags & VM_MASK)) { |
|---|
| 116 | base = seg << 4; |
|---|
| 117 | *eip_limit = base + 0xffff; |
|---|
| 118 | return base + (eip & 0xffff); |
|---|
| 119 | } |
|---|
| 120 | |
|---|
| 121 | /* The standard kernel/user address space limit. */ |
|---|
| 122 | *eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg; |
|---|
| 123 | |
|---|
| 124 | /* By far the most common cases. */ |
|---|
| 125 | if (likely(seg == __USER_CS || seg == GET_KERNEL_CS())) |
|---|
| 126 | return eip; |
|---|
| 127 | |
|---|
| 128 | /* Check the segment exists, is within the current LDT/GDT size, |
|---|
| 129 | that kernel/user (ring 0..3) has the appropriate privilege, |
|---|
| 130 | that it's a code segment, and get the limit. */ |
|---|
| 131 | __asm__ ("larl %3,%0; lsll %3,%1" |
|---|
| 132 | : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg)); |
|---|
| 133 | if ((~seg_ar & 0x9800) || eip > seg_limit) { |
|---|
| 134 | *eip_limit = 0; |
|---|
| 135 | return 1; /* So that returned eip > *eip_limit. */ |
|---|
| 136 | } |
|---|
| 137 | |
|---|
| 138 | /* Get the GDT/LDT descriptor base. |
|---|
| 139 | When you look for races in this code remember that |
|---|
| 140 | LDT and other horrors are only used in user space. */ |
|---|
| 141 | if (seg & (1<<2)) { |
|---|
| 142 | /* Must lock the LDT while reading it. */ |
|---|
| 143 | down(¤t->mm->context.sem); |
|---|
| 144 | desc = current->mm->context.ldt; |
|---|
| 145 | desc = (void *)desc + (seg & ~7); |
|---|
| 146 | } else { |
|---|
| 147 | /* Must disable preemption while reading the GDT. */ |
|---|
| 148 | desc = (u32 *)get_cpu_gdt_table(get_cpu()); |
|---|
| 149 | desc = (void *)desc + (seg & ~7); |
|---|
| 150 | } |
|---|
| 151 | |
|---|
| 152 | /* Decode the code segment base from the descriptor */ |
|---|
| 153 | base = get_desc_base((unsigned long *)desc); |
|---|
| 154 | |
|---|
| 155 | if (seg & (1<<2)) { |
|---|
| 156 | up(¤t->mm->context.sem); |
|---|
| 157 | } else |
|---|
| 158 | put_cpu(); |
|---|
| 159 | |
|---|
| 160 | /* Adjust EIP and segment limit, and clamp at the kernel limit. |
|---|
| 161 | It's legitimate for segments to wrap at 0xffffffff. */ |
|---|
| 162 | seg_limit += base; |
|---|
| 163 | if (seg_limit < *eip_limit && seg_limit >= base) |
|---|
| 164 | *eip_limit = seg_limit; |
|---|
| 165 | return eip + base; |
|---|
| 166 | } |
|---|
| 167 | |
|---|
| 168 | /* |
|---|
| 169 | * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. |
|---|
| 170 | * Check that here and ignore it. |
|---|
| 171 | */ |
|---|
| 172 | static int __is_prefetch(struct pt_regs *regs, unsigned long addr) |
|---|
| 173 | { |
|---|
| 174 | unsigned long limit; |
|---|
| 175 | unsigned long instr = get_segment_eip (regs, &limit); |
|---|
| 176 | int scan_more = 1; |
|---|
| 177 | int prefetch = 0; |
|---|
| 178 | int i; |
|---|
| 179 | |
|---|
| 180 | for (i = 0; scan_more && i < 15; i++) { |
|---|
| 181 | unsigned char opcode; |
|---|
| 182 | unsigned char instr_hi; |
|---|
| 183 | unsigned char instr_lo; |
|---|
| 184 | |
|---|
| 185 | if (instr > limit) |
|---|
| 186 | break; |
|---|
| 187 | if (__get_user(opcode, (unsigned char __user *) instr)) |
|---|
| 188 | break; |
|---|
| 189 | |
|---|
| 190 | instr_hi = opcode & 0xf0; |
|---|
| 191 | instr_lo = opcode & 0x0f; |
|---|
| 192 | instr++; |
|---|
| 193 | |
|---|
| 194 | switch (instr_hi) { |
|---|
| 195 | case 0x20: |
|---|
| 196 | case 0x30: |
|---|
| 197 | /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */ |
|---|
| 198 | scan_more = ((instr_lo & 7) == 0x6); |
|---|
| 199 | break; |
|---|
| 200 | |
|---|
| 201 | case 0x60: |
|---|
| 202 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ |
|---|
| 203 | scan_more = (instr_lo & 0xC) == 0x4; |
|---|
| 204 | break; |
|---|
| 205 | case 0xF0: |
|---|
| 206 | /* 0xF0, 0xF2, and 0xF3 are valid prefixes */ |
|---|
| 207 | scan_more = !instr_lo || (instr_lo>>1) == 1; |
|---|
| 208 | break; |
|---|
| 209 | case 0x00: |
|---|
| 210 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ |
|---|
| 211 | scan_more = 0; |
|---|
| 212 | if (instr > limit) |
|---|
| 213 | break; |
|---|
| 214 | if (__get_user(opcode, (unsigned char __user *) instr)) |
|---|
| 215 | break; |
|---|
| 216 | prefetch = (instr_lo == 0xF) && |
|---|
| 217 | (opcode == 0x0D || opcode == 0x18); |
|---|
| 218 | break; |
|---|
| 219 | default: |
|---|
| 220 | scan_more = 0; |
|---|
| 221 | break; |
|---|
| 222 | } |
|---|
| 223 | } |
|---|
| 224 | return prefetch; |
|---|
| 225 | } |
|---|
| 226 | |
|---|
| 227 | static inline int is_prefetch(struct pt_regs *regs, unsigned long addr, |
|---|
| 228 | unsigned long error_code) |
|---|
| 229 | { |
|---|
| 230 | if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD && |
|---|
| 231 | boot_cpu_data.x86 >= 6)) { |
|---|
| 232 | /* Catch an obscure case of prefetch inside an NX page. */ |
|---|
| 233 | if (nx_enabled && (error_code & 16)) |
|---|
| 234 | return 0; |
|---|
| 235 | return __is_prefetch(regs, addr); |
|---|
| 236 | } |
|---|
| 237 | return 0; |
|---|
| 238 | } |
|---|
| 239 | |
|---|
| 240 | static noinline void force_sig_info_fault(int si_signo, int si_code, |
|---|
| 241 | unsigned long address, struct task_struct *tsk) |
|---|
| 242 | { |
|---|
| 243 | siginfo_t info; |
|---|
| 244 | |
|---|
| 245 | info.si_signo = si_signo; |
|---|
| 246 | info.si_errno = 0; |
|---|
| 247 | info.si_code = si_code; |
|---|
| 248 | info.si_addr = (void __user *)address; |
|---|
| 249 | force_sig_info(si_signo, &info, tsk); |
|---|
| 250 | } |
|---|
| 251 | |
|---|
| 252 | fastcall void do_invalid_op(struct pt_regs *, unsigned long); |
|---|
| 253 | |
|---|
| 254 | #ifdef CONFIG_X86_PAE |
|---|
| 255 | static void dump_fault_path(unsigned long address) |
|---|
| 256 | { |
|---|
| 257 | unsigned long *p, page; |
|---|
| 258 | unsigned long mfn; |
|---|
| 259 | |
|---|
| 260 | page = read_cr3(); |
|---|
| 261 | p = (unsigned long *)__va(page); |
|---|
| 262 | p += (address >> 30) * 2; |
|---|
| 263 | printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]); |
|---|
| 264 | if (p[0] & 1) { |
|---|
| 265 | mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20); |
|---|
| 266 | page = mfn_to_pfn(mfn) << PAGE_SHIFT; |
|---|
| 267 | p = (unsigned long *)__va(page); |
|---|
| 268 | address &= 0x3fffffff; |
|---|
| 269 | p += (address >> 21) * 2; |
|---|
| 270 | printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n", |
|---|
| 271 | page, p[1], p[0]); |
|---|
| 272 | mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20); |
|---|
| 273 | #ifdef CONFIG_HIGHPTE |
|---|
| 274 | if (mfn_to_pfn(mfn) >= highstart_pfn) |
|---|
| 275 | return; |
|---|
| 276 | #endif |
|---|
| 277 | if (p[0] & 1) { |
|---|
| 278 | page = mfn_to_pfn(mfn) << PAGE_SHIFT; |
|---|
| 279 | p = (unsigned long *) __va(page); |
|---|
| 280 | address &= 0x001fffff; |
|---|
| 281 | p += (address >> 12) * 2; |
|---|
| 282 | printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n", |
|---|
| 283 | page, p[1], p[0]); |
|---|
| 284 | } |
|---|
| 285 | } |
|---|
| 286 | } |
|---|
| 287 | #else |
|---|
| 288 | static void dump_fault_path(unsigned long address) |
|---|
| 289 | { |
|---|
| 290 | unsigned long page; |
|---|
| 291 | |
|---|
| 292 | page = read_cr3(); |
|---|
| 293 | page = ((unsigned long *) __va(page))[address >> 22]; |
|---|
| 294 | if (oops_may_print()) |
|---|
| 295 | printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page, |
|---|
| 296 | machine_to_phys(page)); |
|---|
| 297 | /* |
|---|
| 298 | * We must not directly access the pte in the highpte |
|---|
| 299 | * case if the page table is located in highmem. |
|---|
| 300 | * And lets rather not kmap-atomic the pte, just in case |
|---|
| 301 | * it's allocated already. |
|---|
| 302 | */ |
|---|
| 303 | #ifdef CONFIG_HIGHPTE |
|---|
| 304 | if ((page >> PAGE_SHIFT) >= highstart_pfn) |
|---|
| 305 | return; |
|---|
| 306 | #endif |
|---|
| 307 | if ((page & 1) && oops_may_print()) { |
|---|
| 308 | page &= PAGE_MASK; |
|---|
| 309 | address &= 0x003ff000; |
|---|
| 310 | page = machine_to_phys(page); |
|---|
| 311 | page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; |
|---|
| 312 | printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page, |
|---|
| 313 | machine_to_phys(page)); |
|---|
| 314 | } |
|---|
| 315 | } |
|---|
| 316 | #endif |
|---|
| 317 | |
|---|
| 318 | static int spurious_fault(struct pt_regs *regs, |
|---|
| 319 | unsigned long address, |
|---|
| 320 | unsigned long error_code) |
|---|
| 321 | { |
|---|
| 322 | pgd_t *pgd; |
|---|
| 323 | pud_t *pud; |
|---|
| 324 | pmd_t *pmd; |
|---|
| 325 | pte_t *pte; |
|---|
| 326 | |
|---|
| 327 | /* Reserved-bit violation or user access to kernel space? */ |
|---|
| 328 | if (error_code & 0x0c) |
|---|
| 329 | return 0; |
|---|
| 330 | |
|---|
| 331 | pgd = init_mm.pgd + pgd_index(address); |
|---|
| 332 | if (!pgd_present(*pgd)) |
|---|
| 333 | return 0; |
|---|
| 334 | |
|---|
| 335 | pud = pud_offset(pgd, address); |
|---|
| 336 | if (!pud_present(*pud)) |
|---|
| 337 | return 0; |
|---|
| 338 | |
|---|
| 339 | pmd = pmd_offset(pud, address); |
|---|
| 340 | if (!pmd_present(*pmd)) |
|---|
| 341 | return 0; |
|---|
| 342 | |
|---|
| 343 | pte = pte_offset_kernel(pmd, address); |
|---|
| 344 | if (!pte_present(*pte)) |
|---|
| 345 | return 0; |
|---|
| 346 | if ((error_code & 0x02) && !pte_write(*pte)) |
|---|
| 347 | return 0; |
|---|
| 348 | #ifdef CONFIG_X86_PAE |
|---|
| 349 | if ((error_code & 0x10) && (pte_val(*pte) & _PAGE_NX)) |
|---|
| 350 | return 0; |
|---|
| 351 | #endif |
|---|
| 352 | |
|---|
| 353 | return 1; |
|---|
| 354 | } |
|---|
| 355 | |
|---|
| 356 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) |
|---|
| 357 | { |
|---|
| 358 | unsigned index = pgd_index(address); |
|---|
| 359 | pgd_t *pgd_k; |
|---|
| 360 | pud_t *pud, *pud_k; |
|---|
| 361 | pmd_t *pmd, *pmd_k; |
|---|
| 362 | |
|---|
| 363 | pgd += index; |
|---|
| 364 | pgd_k = init_mm.pgd + index; |
|---|
| 365 | |
|---|
| 366 | if (!pgd_present(*pgd_k)) |
|---|
| 367 | return NULL; |
|---|
| 368 | |
|---|
| 369 | /* |
|---|
| 370 | * set_pgd(pgd, *pgd_k); here would be useless on PAE |
|---|
| 371 | * and redundant with the set_pmd() on non-PAE. As would |
|---|
| 372 | * set_pud. |
|---|
| 373 | */ |
|---|
| 374 | |
|---|
| 375 | pud = pud_offset(pgd, address); |
|---|
| 376 | pud_k = pud_offset(pgd_k, address); |
|---|
| 377 | if (!pud_present(*pud_k)) |
|---|
| 378 | return NULL; |
|---|
| 379 | |
|---|
| 380 | pmd = pmd_offset(pud, address); |
|---|
| 381 | pmd_k = pmd_offset(pud_k, address); |
|---|
| 382 | if (!pmd_present(*pmd_k)) |
|---|
| 383 | return NULL; |
|---|
| 384 | if (!pmd_present(*pmd)) |
|---|
| 385 | #ifndef CONFIG_XEN |
|---|
| 386 | set_pmd(pmd, *pmd_k); |
|---|
| 387 | #else |
|---|
| 388 | /* |
|---|
| 389 | * When running on Xen we must launder *pmd_k through |
|---|
| 390 | * pmd_val() to ensure that _PAGE_PRESENT is correctly set. |
|---|
| 391 | */ |
|---|
| 392 | set_pmd(pmd, __pmd(pmd_val(*pmd_k))); |
|---|
| 393 | #endif |
|---|
| 394 | else |
|---|
| 395 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); |
|---|
| 396 | return pmd_k; |
|---|
| 397 | } |
|---|
| 398 | |
|---|
| 399 | /* |
|---|
| 400 | * Handle a fault on the vmalloc or module mapping area |
|---|
| 401 | * |
|---|
| 402 | * This assumes no large pages in there. |
|---|
| 403 | */ |
|---|
| 404 | static inline int vmalloc_fault(unsigned long address) |
|---|
| 405 | { |
|---|
| 406 | unsigned long pgd_paddr; |
|---|
| 407 | pmd_t *pmd_k; |
|---|
| 408 | pte_t *pte_k; |
|---|
| 409 | /* |
|---|
| 410 | * Synchronize this task's top level page-table |
|---|
| 411 | * with the 'reference' page table. |
|---|
| 412 | * |
|---|
| 413 | * Do _not_ use "current" here. We might be inside |
|---|
| 414 | * an interrupt in the middle of a task switch.. |
|---|
| 415 | */ |
|---|
| 416 | pgd_paddr = read_cr3(); |
|---|
| 417 | pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); |
|---|
| 418 | if (!pmd_k) |
|---|
| 419 | return -1; |
|---|
| 420 | pte_k = pte_offset_kernel(pmd_k, address); |
|---|
| 421 | if (!pte_present(*pte_k)) |
|---|
| 422 | return -1; |
|---|
| 423 | return 0; |
|---|
| 424 | } |
|---|
| 425 | |
|---|
| 426 | /* |
|---|
| 427 | * This routine handles page faults. It determines the address, |
|---|
| 428 | * and the problem, and then passes it off to one of the appropriate |
|---|
| 429 | * routines. |
|---|
| 430 | * |
|---|
| 431 | * error_code: |
|---|
| 432 | * bit 0 == 0 means no page found, 1 means protection fault |
|---|
| 433 | * bit 1 == 0 means read, 1 means write |
|---|
| 434 | * bit 2 == 0 means kernel, 1 means user-mode |
|---|
| 435 | * bit 3 == 1 means use of reserved bit detected |
|---|
| 436 | * bit 4 == 1 means fault was an instruction fetch |
|---|
| 437 | */ |
|---|
| 438 | fastcall void __kprobes do_page_fault(struct pt_regs *regs, |
|---|
| 439 | unsigned long error_code) |
|---|
| 440 | { |
|---|
| 441 | struct task_struct *tsk; |
|---|
| 442 | struct mm_struct *mm; |
|---|
| 443 | struct vm_area_struct * vma; |
|---|
| 444 | unsigned long address; |
|---|
| 445 | int write, si_code; |
|---|
| 446 | |
|---|
| 447 | /* get the address */ |
|---|
| 448 | address = read_cr2(); |
|---|
| 449 | |
|---|
| 450 | /* Set the "privileged fault" bit to something sane. */ |
|---|
| 451 | error_code &= ~4; |
|---|
| 452 | error_code |= (regs->xcs & 2) << 1; |
|---|
| 453 | if (regs->eflags & X86_EFLAGS_VM) |
|---|
| 454 | error_code |= 4; |
|---|
| 455 | |
|---|
| 456 | tsk = current; |
|---|
| 457 | |
|---|
| 458 | si_code = SEGV_MAPERR; |
|---|
| 459 | |
|---|
| 460 | /* |
|---|
| 461 | * We fault-in kernel-space virtual memory on-demand. The |
|---|
| 462 | * 'reference' page table is init_mm.pgd. |
|---|
| 463 | * |
|---|
| 464 | * NOTE! We MUST NOT take any locks for this case. We may |
|---|
| 465 | * be in an interrupt or a critical region, and should |
|---|
| 466 | * only copy the information from the master page table, |
|---|
| 467 | * nothing more. |
|---|
| 468 | * |
|---|
| 469 | * This verifies that the fault happens in kernel space |
|---|
| 470 | * (error_code & 4) == 0, and that the fault was not a |
|---|
| 471 | * protection error (error_code & 9) == 0. |
|---|
| 472 | */ |
|---|
| 473 | if (unlikely(address >= TASK_SIZE)) { |
|---|
| 474 | #ifdef CONFIG_XEN |
|---|
| 475 | /* Faults in hypervisor area can never be patched up. */ |
|---|
| 476 | if (address >= hypervisor_virt_start) |
|---|
| 477 | goto bad_area_nosemaphore; |
|---|
| 478 | #endif |
|---|
| 479 | if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0) |
|---|
| 480 | return; |
|---|
| 481 | /* Can take a spurious fault if mapping changes R/O -> R/W. */ |
|---|
| 482 | if (spurious_fault(regs, address, error_code)) |
|---|
| 483 | return; |
|---|
| 484 | if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, |
|---|
| 485 | SIGSEGV) == NOTIFY_STOP) |
|---|
| 486 | return; |
|---|
| 487 | /* |
|---|
| 488 | * Don't take the mm semaphore here. If we fixup a prefetch |
|---|
| 489 | * fault we could otherwise deadlock. |
|---|
| 490 | */ |
|---|
| 491 | goto bad_area_nosemaphore; |
|---|
| 492 | } |
|---|
| 493 | |
|---|
| 494 | if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, |
|---|
| 495 | SIGSEGV) == NOTIFY_STOP) |
|---|
| 496 | return; |
|---|
| 497 | |
|---|
| 498 | /* It's safe to allow irq's after cr2 has been saved and the vmalloc |
|---|
| 499 | fault has been handled. */ |
|---|
| 500 | if (regs->eflags & (X86_EFLAGS_IF|VM_MASK)) |
|---|
| 501 | local_irq_enable(); |
|---|
| 502 | |
|---|
| 503 | mm = tsk->mm; |
|---|
| 504 | |
|---|
| 505 | /* |
|---|
| 506 | * If we're in an interrupt, have no user context or are running in an |
|---|
| 507 | * atomic region then we must not take the fault.. |
|---|
| 508 | */ |
|---|
| 509 | if (in_atomic() || !mm) |
|---|
| 510 | goto bad_area_nosemaphore; |
|---|
| 511 | |
|---|
| 512 | /* When running in the kernel we expect faults to occur only to |
|---|
| 513 | * addresses in user space. All other faults represent errors in the |
|---|
| 514 | * kernel and should generate an OOPS. Unfortunatly, in the case of an |
|---|
| 515 | * erroneous fault occurring in a code path which already holds mmap_sem |
|---|
| 516 | * we will deadlock attempting to validate the fault against the |
|---|
| 517 | * address space. Luckily the kernel only validly references user |
|---|
| 518 | * space from well defined areas of code, which are listed in the |
|---|
| 519 | * exceptions table. |
|---|
| 520 | * |
|---|
| 521 | * As the vast majority of faults will be valid we will only perform |
|---|
| 522 | * the source reference check when there is a possibilty of a deadlock. |
|---|
| 523 | * Attempt to lock the address space, if we cannot we then validate the |
|---|
| 524 | * source. If this is invalid we can skip the address space check, |
|---|
| 525 | * thus avoiding the deadlock. |
|---|
| 526 | */ |
|---|
| 527 | if (!down_read_trylock(&mm->mmap_sem)) { |
|---|
| 528 | if ((error_code & 4) == 0 && |
|---|
| 529 | !search_exception_tables(regs->eip)) |
|---|
| 530 | goto bad_area_nosemaphore; |
|---|
| 531 | down_read(&mm->mmap_sem); |
|---|
| 532 | } |
|---|
| 533 | |
|---|
| 534 | vma = find_vma(mm, address); |
|---|
| 535 | if (!vma) |
|---|
| 536 | goto bad_area; |
|---|
| 537 | if (vma->vm_start <= address) |
|---|
| 538 | goto good_area; |
|---|
| 539 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
|---|
| 540 | goto bad_area; |
|---|
| 541 | if (error_code & 4) { |
|---|
| 542 | /* |
|---|
| 543 | * Accessing the stack below %esp is always a bug. |
|---|
| 544 | * The large cushion allows instructions like enter |
|---|
| 545 | * and pusha to work. ("enter $65535,$31" pushes |
|---|
| 546 | * 32 pointers and then decrements %esp by 65535.) |
|---|
| 547 | */ |
|---|
| 548 | if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp) |
|---|
| 549 | goto bad_area; |
|---|
| 550 | } |
|---|
| 551 | if (expand_stack(vma, address)) |
|---|
| 552 | goto bad_area; |
|---|
| 553 | /* |
|---|
| 554 | * Ok, we have a good vm_area for this memory access, so |
|---|
| 555 | * we can handle it.. |
|---|
| 556 | */ |
|---|
| 557 | good_area: |
|---|
| 558 | si_code = SEGV_ACCERR; |
|---|
| 559 | write = 0; |
|---|
| 560 | switch (error_code & 3) { |
|---|
| 561 | default: /* 3: write, present */ |
|---|
| 562 | #ifdef TEST_VERIFY_AREA |
|---|
| 563 | if (regs->cs == GET_KERNEL_CS()) |
|---|
| 564 | printk("WP fault at %08lx\n", regs->eip); |
|---|
| 565 | #endif |
|---|
| 566 | /* fall through */ |
|---|
| 567 | case 2: /* write, not present */ |
|---|
| 568 | if (!(vma->vm_flags & VM_WRITE)) |
|---|
| 569 | goto bad_area; |
|---|
| 570 | write++; |
|---|
| 571 | break; |
|---|
| 572 | case 1: /* read, present */ |
|---|
| 573 | goto bad_area; |
|---|
| 574 | case 0: /* read, not present */ |
|---|
| 575 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) |
|---|
| 576 | goto bad_area; |
|---|
| 577 | } |
|---|
| 578 | |
|---|
| 579 | survive: |
|---|
| 580 | /* |
|---|
| 581 | * If for any reason at all we couldn't handle the fault, |
|---|
| 582 | * make sure we exit gracefully rather than endlessly redo |
|---|
| 583 | * the fault. |
|---|
| 584 | */ |
|---|
| 585 | switch (handle_mm_fault(mm, vma, address, write)) { |
|---|
| 586 | case VM_FAULT_MINOR: |
|---|
| 587 | tsk->min_flt++; |
|---|
| 588 | break; |
|---|
| 589 | case VM_FAULT_MAJOR: |
|---|
| 590 | tsk->maj_flt++; |
|---|
| 591 | break; |
|---|
| 592 | case VM_FAULT_SIGBUS: |
|---|
| 593 | goto do_sigbus; |
|---|
| 594 | case VM_FAULT_OOM: |
|---|
| 595 | goto out_of_memory; |
|---|
| 596 | default: |
|---|
| 597 | BUG(); |
|---|
| 598 | } |
|---|
| 599 | |
|---|
| 600 | /* |
|---|
| 601 | * Did it hit the DOS screen memory VA from vm86 mode? |
|---|
| 602 | */ |
|---|
| 603 | if (regs->eflags & VM_MASK) { |
|---|
| 604 | unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT; |
|---|
| 605 | if (bit < 32) |
|---|
| 606 | tsk->thread.screen_bitmap |= 1 << bit; |
|---|
| 607 | } |
|---|
| 608 | up_read(&mm->mmap_sem); |
|---|
| 609 | return; |
|---|
| 610 | |
|---|
| 611 | /* |
|---|
| 612 | * Something tried to access memory that isn't in our memory map.. |
|---|
| 613 | * Fix it, but check if it's kernel or user first.. |
|---|
| 614 | */ |
|---|
| 615 | bad_area: |
|---|
| 616 | up_read(&mm->mmap_sem); |
|---|
| 617 | |
|---|
| 618 | bad_area_nosemaphore: |
|---|
| 619 | /* User mode accesses just cause a SIGSEGV */ |
|---|
| 620 | if (error_code & 4) { |
|---|
| 621 | /* |
|---|
| 622 | * Valid to do another page fault here because this one came |
|---|
| 623 | * from user space. |
|---|
| 624 | */ |
|---|
| 625 | if (is_prefetch(regs, address, error_code)) |
|---|
| 626 | return; |
|---|
| 627 | |
|---|
| 628 | tsk->thread.cr2 = address; |
|---|
| 629 | /* Kernel addresses are always protection faults */ |
|---|
| 630 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); |
|---|
| 631 | tsk->thread.trap_no = 14; |
|---|
| 632 | force_sig_info_fault(SIGSEGV, si_code, address, tsk); |
|---|
| 633 | return; |
|---|
| 634 | } |
|---|
| 635 | |
|---|
| 636 | #ifdef CONFIG_X86_F00F_BUG |
|---|
| 637 | /* |
|---|
| 638 | * Pentium F0 0F C7 C8 bug workaround. |
|---|
| 639 | */ |
|---|
| 640 | if (boot_cpu_data.f00f_bug) { |
|---|
| 641 | unsigned long nr; |
|---|
| 642 | |
|---|
| 643 | nr = (address - idt_descr.address) >> 3; |
|---|
| 644 | |
|---|
| 645 | if (nr == 6) { |
|---|
| 646 | do_invalid_op(regs, 0); |
|---|
| 647 | return; |
|---|
| 648 | } |
|---|
| 649 | } |
|---|
| 650 | #endif |
|---|
| 651 | |
|---|
| 652 | no_context: |
|---|
| 653 | /* Are we prepared to handle this kernel fault? */ |
|---|
| 654 | if (fixup_exception(regs)) |
|---|
| 655 | return; |
|---|
| 656 | |
|---|
| 657 | /* |
|---|
| 658 | * Valid to do another page fault here, because if this fault |
|---|
| 659 | * had been triggered by is_prefetch fixup_exception would have |
|---|
| 660 | * handled it. |
|---|
| 661 | */ |
|---|
| 662 | if (is_prefetch(regs, address, error_code)) |
|---|
| 663 | return; |
|---|
| 664 | |
|---|
| 665 | /* |
|---|
| 666 | * Oops. The kernel tried to access some bad page. We'll have to |
|---|
| 667 | * terminate things with extreme prejudice. |
|---|
| 668 | */ |
|---|
| 669 | |
|---|
| 670 | bust_spinlocks(1); |
|---|
| 671 | |
|---|
| 672 | if (oops_may_print()) { |
|---|
| 673 | #ifdef CONFIG_X86_PAE |
|---|
| 674 | if (error_code & 16) { |
|---|
| 675 | pte_t *pte = lookup_address(address); |
|---|
| 676 | |
|---|
| 677 | if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) |
|---|
| 678 | printk(KERN_CRIT "kernel tried to execute " |
|---|
| 679 | "NX-protected page - exploit attempt? " |
|---|
| 680 | "(uid: %d)\n", current->uid); |
|---|
| 681 | } |
|---|
| 682 | #endif |
|---|
| 683 | if (address < PAGE_SIZE) |
|---|
| 684 | printk(KERN_ALERT "BUG: unable to handle kernel NULL " |
|---|
| 685 | "pointer dereference"); |
|---|
| 686 | else |
|---|
| 687 | printk(KERN_ALERT "BUG: unable to handle kernel paging" |
|---|
| 688 | " request"); |
|---|
| 689 | printk(" at virtual address %08lx\n",address); |
|---|
| 690 | printk(KERN_ALERT " printing eip:\n"); |
|---|
| 691 | printk("%08lx\n", regs->eip); |
|---|
| 692 | } |
|---|
| 693 | dump_fault_path(address); |
|---|
| 694 | tsk->thread.cr2 = address; |
|---|
| 695 | tsk->thread.trap_no = 14; |
|---|
| 696 | tsk->thread.error_code = error_code; |
|---|
| 697 | die("Oops", regs, error_code); |
|---|
| 698 | bust_spinlocks(0); |
|---|
| 699 | do_exit(SIGKILL); |
|---|
| 700 | |
|---|
| 701 | /* |
|---|
| 702 | * We ran out of memory, or some other thing happened to us that made |
|---|
| 703 | * us unable to handle the page fault gracefully. |
|---|
| 704 | */ |
|---|
| 705 | out_of_memory: |
|---|
| 706 | up_read(&mm->mmap_sem); |
|---|
| 707 | if (tsk->pid == 1) { |
|---|
| 708 | yield(); |
|---|
| 709 | down_read(&mm->mmap_sem); |
|---|
| 710 | goto survive; |
|---|
| 711 | } |
|---|
| 712 | printk("VM: killing process %s\n", tsk->comm); |
|---|
| 713 | if (error_code & 4) |
|---|
| 714 | do_exit(SIGKILL); |
|---|
| 715 | goto no_context; |
|---|
| 716 | |
|---|
| 717 | do_sigbus: |
|---|
| 718 | up_read(&mm->mmap_sem); |
|---|
| 719 | |
|---|
| 720 | /* Kernel mode? Handle exceptions or die */ |
|---|
| 721 | if (!(error_code & 4)) |
|---|
| 722 | goto no_context; |
|---|
| 723 | |
|---|
| 724 | /* User space => ok to do another page fault */ |
|---|
| 725 | if (is_prefetch(regs, address, error_code)) |
|---|
| 726 | return; |
|---|
| 727 | |
|---|
| 728 | tsk->thread.cr2 = address; |
|---|
| 729 | tsk->thread.error_code = error_code; |
|---|
| 730 | tsk->thread.trap_no = 14; |
|---|
| 731 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); |
|---|
| 732 | } |
|---|
| 733 | |
|---|
| 734 | #if !HAVE_SHARED_KERNEL_PMD |
|---|
| 735 | void vmalloc_sync_all(void) |
|---|
| 736 | { |
|---|
| 737 | /* |
|---|
| 738 | * Note that races in the updates of insync and start aren't |
|---|
| 739 | * problematic: insync can only get set bits added, and updates to |
|---|
| 740 | * start are only improving performance (without affecting correctness |
|---|
| 741 | * if undone). |
|---|
| 742 | */ |
|---|
| 743 | static DECLARE_BITMAP(insync, PTRS_PER_PGD); |
|---|
| 744 | static unsigned long start = TASK_SIZE; |
|---|
| 745 | unsigned long address; |
|---|
| 746 | |
|---|
| 747 | BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK); |
|---|
| 748 | for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) { |
|---|
| 749 | if (!test_bit(pgd_index(address), insync)) { |
|---|
| 750 | unsigned long flags; |
|---|
| 751 | struct page *page; |
|---|
| 752 | |
|---|
| 753 | spin_lock_irqsave(&pgd_lock, flags); |
|---|
| 754 | for (page = pgd_list; page; page = |
|---|
| 755 | (struct page *)page->index) |
|---|
| 756 | if (!vmalloc_sync_one(page_address(page), |
|---|
| 757 | address)) { |
|---|
| 758 | BUG_ON(page != pgd_list); |
|---|
| 759 | break; |
|---|
| 760 | } |
|---|
| 761 | spin_unlock_irqrestore(&pgd_lock, flags); |
|---|
| 762 | if (!page) |
|---|
| 763 | set_bit(pgd_index(address), insync); |
|---|
| 764 | } |
|---|
| 765 | if (address == start && test_bit(pgd_index(address), insync)) |
|---|
| 766 | start = address + PGDIR_SIZE; |
|---|
| 767 | } |
|---|
| 768 | } |
|---|
| 769 | #endif |
|---|