1 | #ifndef _X86_64_KEXEC_H |
---|
2 | #define _X86_64_KEXEC_H |
---|
3 | |
---|
4 | #define PA_CONTROL_PAGE 0 |
---|
5 | #define VA_CONTROL_PAGE 1 |
---|
6 | #define PA_PGD 2 |
---|
7 | #define VA_PGD 3 |
---|
8 | #define PA_PUD_0 4 |
---|
9 | #define VA_PUD_0 5 |
---|
10 | #define PA_PMD_0 6 |
---|
11 | #define VA_PMD_0 7 |
---|
12 | #define PA_PTE_0 8 |
---|
13 | #define VA_PTE_0 9 |
---|
14 | #define PA_PUD_1 10 |
---|
15 | #define VA_PUD_1 11 |
---|
16 | #define PA_PMD_1 12 |
---|
17 | #define VA_PMD_1 13 |
---|
18 | #define PA_PTE_1 14 |
---|
19 | #define VA_PTE_1 15 |
---|
20 | #define PA_TABLE_PAGE 16 |
---|
21 | #define PAGES_NR 17 |
---|
22 | |
---|
23 | #ifndef __ASSEMBLY__ |
---|
24 | |
---|
25 | #include <linux/string.h> |
---|
26 | |
---|
27 | #include <asm/page.h> |
---|
28 | #include <asm/ptrace.h> |
---|
29 | |
---|
30 | /* |
---|
31 | * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. |
---|
32 | * I.e. Maximum page that is mapped directly into kernel memory, |
---|
33 | * and kmap is not required. |
---|
34 | * |
---|
35 | * So far x86_64 is limited to 40 physical address bits. |
---|
36 | */ |
---|
37 | |
---|
38 | /* Maximum physical address we can use pages from */ |
---|
39 | #define KEXEC_SOURCE_MEMORY_LIMIT (0xFFFFFFFFFFUL) |
---|
40 | /* Maximum address we can reach in physical address mode */ |
---|
41 | #define KEXEC_DESTINATION_MEMORY_LIMIT (0xFFFFFFFFFFUL) |
---|
42 | /* Maximum address we can use for the control pages */ |
---|
43 | #define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL) |
---|
44 | |
---|
45 | /* Allocate one page for the pdp and the second for the code */ |
---|
46 | #define KEXEC_CONTROL_CODE_SIZE (4096UL + 4096UL) |
---|
47 | |
---|
48 | /* The native architecture */ |
---|
49 | #define KEXEC_ARCH KEXEC_ARCH_X86_64 |
---|
50 | |
---|
51 | #define MAX_NOTE_BYTES 1024 |
---|
52 | |
---|
53 | /* |
---|
54 | * Saving the registers of the cpu on which panic occured in |
---|
55 | * crash_kexec to save a valid sp. The registers of other cpus |
---|
56 | * will be saved in machine_crash_shutdown while shooting down them. |
---|
57 | */ |
---|
58 | |
---|
59 | static inline void crash_setup_regs(struct pt_regs *newregs, |
---|
60 | struct pt_regs *oldregs) |
---|
61 | { |
---|
62 | if (oldregs) |
---|
63 | memcpy(newregs, oldregs, sizeof(*newregs)); |
---|
64 | else { |
---|
65 | __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->rbx)); |
---|
66 | __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->rcx)); |
---|
67 | __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->rdx)); |
---|
68 | __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->rsi)); |
---|
69 | __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->rdi)); |
---|
70 | __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->rbp)); |
---|
71 | __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->rax)); |
---|
72 | __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->rsp)); |
---|
73 | __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8)); |
---|
74 | __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9)); |
---|
75 | __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10)); |
---|
76 | __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11)); |
---|
77 | __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12)); |
---|
78 | __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13)); |
---|
79 | __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14)); |
---|
80 | __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15)); |
---|
81 | __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); |
---|
82 | __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); |
---|
83 | __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->eflags)); |
---|
84 | |
---|
85 | newregs->rip = (unsigned long)current_text_addr(); |
---|
86 | } |
---|
87 | } |
---|
88 | |
---|
89 | NORET_TYPE void |
---|
90 | relocate_kernel(unsigned long indirection_page, |
---|
91 | unsigned long page_list, |
---|
92 | unsigned long start_address) ATTRIB_NORET; |
---|
93 | |
---|
94 | /* Under Xen we need to work with machine addresses. These macros give the |
---|
95 | * machine address of a certain page to the generic kexec code instead of |
---|
96 | * the pseudo physical address which would be given by the default macros. |
---|
97 | */ |
---|
98 | |
---|
99 | #ifdef CONFIG_XEN |
---|
100 | #define KEXEC_ARCH_HAS_PAGE_MACROS |
---|
101 | #define kexec_page_to_pfn(page) pfn_to_mfn(page_to_pfn(page)) |
---|
102 | #define kexec_pfn_to_page(pfn) pfn_to_page(mfn_to_pfn(pfn)) |
---|
103 | #define kexec_virt_to_phys(addr) virt_to_machine(addr) |
---|
104 | #define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr)) |
---|
105 | #endif |
---|
106 | |
---|
107 | #endif /* __ASSEMBLY__ */ |
---|
108 | |
---|
109 | #endif /* _X86_64_KEXEC_H */ |
---|