1 | #ifndef _X8664_TLBFLUSH_H |
---|
2 | #define _X8664_TLBFLUSH_H |
---|
3 | |
---|
4 | #include <linux/mm.h> |
---|
5 | #include <asm/processor.h> |
---|
6 | |
---|
7 | #define __flush_tlb() xen_tlb_flush() |
---|
8 | |
---|
9 | /* |
---|
10 | * Global pages have to be flushed a bit differently. Not a real |
---|
11 | * performance problem because this does not happen often. |
---|
12 | */ |
---|
13 | #define __flush_tlb_global() xen_tlb_flush() |
---|
14 | |
---|
15 | |
---|
16 | extern unsigned long pgkern_mask; |
---|
17 | |
---|
18 | #define __flush_tlb_all() __flush_tlb_global() |
---|
19 | |
---|
20 | #define __flush_tlb_one(addr) xen_invlpg((unsigned long)addr) |
---|
21 | |
---|
22 | |
---|
23 | /* |
---|
24 | * TLB flushing: |
---|
25 | * |
---|
26 | * - flush_tlb() flushes the current mm struct TLBs |
---|
27 | * - flush_tlb_all() flushes all processes TLBs |
---|
28 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
---|
29 | * - flush_tlb_page(vma, vmaddr) flushes one page |
---|
30 | * - flush_tlb_range(vma, start, end) flushes a range of pages |
---|
31 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages |
---|
32 | * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables |
---|
33 | * |
---|
34 | * x86-64 can only flush individual pages or full VMs. For a range flush |
---|
35 | * we always do the full VM. Might be worth trying if for a small |
---|
36 | * range a few INVLPGs in a row are a win. |
---|
37 | */ |
---|
38 | |
---|
39 | #ifndef CONFIG_SMP |
---|
40 | |
---|
41 | #define flush_tlb() __flush_tlb() |
---|
42 | #define flush_tlb_all() __flush_tlb_all() |
---|
43 | #define local_flush_tlb() __flush_tlb() |
---|
44 | |
---|
45 | static inline void flush_tlb_mm(struct mm_struct *mm) |
---|
46 | { |
---|
47 | if (mm == current->active_mm) |
---|
48 | __flush_tlb(); |
---|
49 | } |
---|
50 | |
---|
51 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
---|
52 | unsigned long addr) |
---|
53 | { |
---|
54 | if (vma->vm_mm == current->active_mm) |
---|
55 | __flush_tlb_one(addr); |
---|
56 | } |
---|
57 | |
---|
58 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
---|
59 | unsigned long start, unsigned long end) |
---|
60 | { |
---|
61 | if (vma->vm_mm == current->active_mm) |
---|
62 | __flush_tlb(); |
---|
63 | } |
---|
64 | |
---|
65 | #else |
---|
66 | |
---|
67 | #include <asm/smp.h> |
---|
68 | |
---|
69 | #define local_flush_tlb() \ |
---|
70 | __flush_tlb() |
---|
71 | |
---|
72 | extern void flush_tlb_all(void); |
---|
73 | extern void flush_tlb_current_task(void); |
---|
74 | extern void flush_tlb_mm(struct mm_struct *); |
---|
75 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); |
---|
76 | |
---|
77 | #define flush_tlb() flush_tlb_current_task() |
---|
78 | |
---|
79 | static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) |
---|
80 | { |
---|
81 | flush_tlb_mm(vma->vm_mm); |
---|
82 | } |
---|
83 | |
---|
84 | #define TLBSTATE_OK 1 |
---|
85 | #define TLBSTATE_LAZY 2 |
---|
86 | |
---|
87 | /* Roughly an IPI every 20MB with 4k pages for freeing page table |
---|
88 | ranges. Cost is about 42k of memory for each CPU. */ |
---|
89 | #define ARCH_FREE_PTE_NR 5350 |
---|
90 | |
---|
91 | #endif |
---|
92 | |
---|
93 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() |
---|
94 | |
---|
95 | static inline void flush_tlb_pgtables(struct mm_struct *mm, |
---|
96 | unsigned long start, unsigned long end) |
---|
97 | { |
---|
98 | /* x86_64 does not keep any page table caches in a software TLB. |
---|
99 | The CPUs do in their hardware TLBs, but they are handled |
---|
100 | by the normal TLB flushing algorithms. */ |
---|
101 | } |
---|
102 | |
---|
103 | #endif /* _X8664_TLBFLUSH_H */ |
---|