1 | #ifndef _I386_KEXEC_H |
---|
2 | #define _I386_KEXEC_H |
---|
3 | |
---|
4 | #define PA_CONTROL_PAGE 0 |
---|
5 | #define VA_CONTROL_PAGE 1 |
---|
6 | #define PA_PGD 2 |
---|
7 | #define VA_PGD 3 |
---|
8 | #define PA_PTE_0 4 |
---|
9 | #define VA_PTE_0 5 |
---|
10 | #define PA_PTE_1 6 |
---|
11 | #define VA_PTE_1 7 |
---|
12 | #ifdef CONFIG_X86_PAE |
---|
13 | #define PA_PMD_0 8 |
---|
14 | #define VA_PMD_0 9 |
---|
15 | #define PA_PMD_1 10 |
---|
16 | #define VA_PMD_1 11 |
---|
17 | #define PAGES_NR 12 |
---|
18 | #else |
---|
19 | #define PAGES_NR 8 |
---|
20 | #endif |
---|
21 | |
---|
22 | #ifndef __ASSEMBLY__ |
---|
23 | |
---|
24 | #include <asm/fixmap.h> |
---|
25 | #include <asm/ptrace.h> |
---|
26 | #include <asm/string.h> |
---|
27 | |
---|
28 | /* |
---|
29 | * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. |
---|
30 | * I.e. Maximum page that is mapped directly into kernel memory, |
---|
31 | * and kmap is not required. |
---|
32 | * |
---|
33 | * Someone correct me if FIXADDR_START - PAGEOFFSET is not the correct |
---|
34 | * calculation for the amount of memory directly mappable into the |
---|
35 | * kernel memory space. |
---|
36 | */ |
---|
37 | |
---|
38 | /* Maximum physical address we can use pages from */ |
---|
39 | #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) |
---|
40 | /* Maximum address we can reach in physical address mode */ |
---|
41 | #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) |
---|
42 | /* Maximum address we can use for the control code buffer */ |
---|
43 | #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE |
---|
44 | |
---|
45 | #define KEXEC_CONTROL_CODE_SIZE 4096 |
---|
46 | |
---|
47 | /* The native architecture */ |
---|
48 | #define KEXEC_ARCH KEXEC_ARCH_386 |
---|
49 | |
---|
50 | /* We can also handle crash dumps from 64 bit kernel. */ |
---|
51 | #define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64) |
---|
52 | |
---|
53 | #define MAX_NOTE_BYTES 1024 |
---|
54 | |
---|
55 | /* CPU does not save ss and esp on stack if execution is already |
---|
56 | * running in kernel mode at the time of NMI occurrence. This code |
---|
57 | * fixes it. |
---|
58 | */ |
---|
59 | static inline void crash_fixup_ss_esp(struct pt_regs *newregs, |
---|
60 | struct pt_regs *oldregs) |
---|
61 | { |
---|
62 | memcpy(newregs, oldregs, sizeof(*newregs)); |
---|
63 | newregs->esp = (unsigned long)&(oldregs->esp); |
---|
64 | __asm__ __volatile__( |
---|
65 | "xorl %%eax, %%eax\n\t" |
---|
66 | "movw %%ss, %%ax\n\t" |
---|
67 | :"=a"(newregs->xss)); |
---|
68 | } |
---|
69 | |
---|
70 | /* |
---|
71 | * This function is responsible for capturing register states if coming |
---|
72 | * via panic otherwise just fix up the ss and esp if coming via kernel |
---|
73 | * mode exception. |
---|
74 | */ |
---|
75 | static inline void crash_setup_regs(struct pt_regs *newregs, |
---|
76 | struct pt_regs *oldregs) |
---|
77 | { |
---|
78 | if (oldregs) |
---|
79 | crash_fixup_ss_esp(newregs, oldregs); |
---|
80 | else { |
---|
81 | __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->ebx)); |
---|
82 | __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->ecx)); |
---|
83 | __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->edx)); |
---|
84 | __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->esi)); |
---|
85 | __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->edi)); |
---|
86 | __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->ebp)); |
---|
87 | __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->eax)); |
---|
88 | __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->esp)); |
---|
89 | __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(newregs->xss)); |
---|
90 | __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(newregs->xcs)); |
---|
91 | __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(newregs->xds)); |
---|
92 | __asm__ __volatile__("movw %%es, %%ax;" :"=a"(newregs->xes)); |
---|
93 | __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->eflags)); |
---|
94 | |
---|
95 | newregs->eip = (unsigned long)current_text_addr(); |
---|
96 | } |
---|
97 | } |
---|
98 | asmlinkage NORET_TYPE void |
---|
99 | relocate_kernel(unsigned long indirection_page, |
---|
100 | unsigned long control_page, |
---|
101 | unsigned long start_address, |
---|
102 | unsigned int has_pae) ATTRIB_NORET; |
---|
103 | |
---|
104 | |
---|
105 | /* Under Xen we need to work with machine addresses. These macros give the |
---|
106 | * machine address of a certain page to the generic kexec code instead of |
---|
107 | * the pseudo physical address which would be given by the default macros. |
---|
108 | */ |
---|
109 | |
---|
110 | #ifdef CONFIG_XEN |
---|
111 | #define KEXEC_ARCH_HAS_PAGE_MACROS |
---|
112 | #define kexec_page_to_pfn(page) pfn_to_mfn(page_to_pfn(page)) |
---|
113 | #define kexec_pfn_to_page(pfn) pfn_to_page(mfn_to_pfn(pfn)) |
---|
114 | #define kexec_virt_to_phys(addr) virt_to_machine(addr) |
---|
115 | #define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr)) |
---|
116 | #endif |
---|
117 | |
---|
118 | #endif /* __ASSEMBLY__ */ |
---|
119 | |
---|
120 | #endif /* _I386_KEXEC_H */ |
---|