source: trunk/packages/xen-3.1/xen-3.1/xen/arch/ia64/xen/mm_init.c @ 34

Last change on this file since 34 was 34, checked in by hartmans, 18 years ago

Add xen and xen-common

File size: 4.1 KB
Line 
1/*
2 * Initialize MMU support.
3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 *      David Mosberger-Tang <davidm@hpl.hp.com>
6 */
7#include <linux/config.h>
8#include <linux/kernel.h>
9#include <linux/init.h>
10
11#include <xen/sched.h>
12#include <asm/vhpt.h>
13#include <asm/xenmca.h>
14#include <asm/meminit.h>
15#include <asm/page.h>
16
17struct ia64_mca_tlb_info ia64_mca_tlb_list[NR_CPUS];
18
19extern void ia64_tlb_init (void);
20
21void __devinit
22ia64_mmu_init (void *my_cpu_data)
23{
24        unsigned long psr, impl_va_bits;
25#if 0
26        unsigned long pta;
27#endif
28        extern void __devinit tlb_init (void);
29        int cpu;
30
31#ifdef CONFIG_DISABLE_VHPT
32#       define VHPT_ENABLE_BIT  0
33#else
34#       define VHPT_ENABLE_BIT  1
35#endif
36
37        /* Pin mapping for percpu area into TLB */
38        psr = ia64_clear_ic();
39        ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
40                 pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
41                 PERCPU_PAGE_SHIFT);
42
43        ia64_set_psr(psr);
44        ia64_srlz_i();
45
46        /*
47         * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
48         * address space.  The IA-64 architecture guarantees that at least 50 bits of
49         * virtual address space are implemented but if we pick a large enough page size
50         * (e.g., 64KB), the mapped address space is big enough that it will overlap with
51         * VMLPT.  I assume that once we run on machines big enough to warrant 64KB pages,
52         * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
53         * problem in practice.  Alternatively, we could truncate the top of the mapped
54         * address space to not permit mappings that would overlap with the VMLPT.
55         * --davidm 00/12/06
56         */
57#       define pte_bits                 3
58#       define mapped_space_bits        (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
59        /*
60         * The virtual page table has to cover the entire implemented address space within
61         * a region even though not all of this space may be mappable.  The reason for
62         * this is that the Access bit and Dirty bit fault handlers perform
63         * non-speculative accesses to the virtual page table, so the address range of the
64         * virtual page table itself needs to be covered by virtual page table.
65         */
66#       define vmlpt_bits               (impl_va_bits - PAGE_SHIFT + pte_bits)
67#       define POW2(n)                  (1ULL << (n))
68
69        impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
70
71        if (impl_va_bits < 51 || impl_va_bits > 61)
72                panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
73
74#ifdef XEN
75        vhpt_init();
76#endif
77#if 0
78        /* place the VMLPT at the end of each page-table mapped region: */
79        pta = POW2(61) - POW2(vmlpt_bits);
80
81        if (POW2(mapped_space_bits) >= pta)
82                panic("mm/init: overlap between virtually mapped linear page table and "
83                      "mapped kernel space!");
84        /*
85         * Set the (virtually mapped linear) page table address.  Bit
86         * 8 selects between the short and long format, bits 2-7 the
87         * size of the table, and bit 0 whether the VHPT walker is
88         * enabled.
89         */
90        ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
91#endif
92        ia64_tlb_init();
93
94#ifdef  CONFIG_HUGETLB_PAGE
95        ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
96        ia64_srlz_d();
97#endif
98
99        cpu = smp_processor_id();
100
101        /* mca handler uses cr.lid as key to pick the right entry */
102        ia64_mca_tlb_list[cpu].cr_lid = ia64_getreg(_IA64_REG_CR_LID);
103
104        /* insert this percpu data information into our list for MCA recovery purposes */
105#ifdef XEN
106        ia64_mca_tlb_list[cpu].percpu_paddr = __pa(my_cpu_data);
107#else
108        ia64_mca_tlb_list[cpu].percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
109        /* Also save per-cpu tlb flush recipe for use in physical mode mca handler */
110        ia64_mca_tlb_list[cpu].ptce_base = local_cpu_data->ptce_base;
111        ia64_mca_tlb_list[cpu].ptce_count[0] = local_cpu_data->ptce_count[0];
112        ia64_mca_tlb_list[cpu].ptce_count[1] = local_cpu_data->ptce_count[1];
113        ia64_mca_tlb_list[cpu].ptce_stride[0] = local_cpu_data->ptce_stride[0];
114        ia64_mca_tlb_list[cpu].ptce_stride[1] = local_cpu_data->ptce_stride[1];
115#endif
116}
117
118void
119mem_init (void)
120{
121#ifdef CONFIG_PCI
122        /*
123         * This needs to be called _after_ the command line has been parsed but _before_
124         * any drivers that may need the PCI DMA interface are initialized or bootmem has
125         * been freed.
126         */
127        platform_dma_init();
128#endif
129
130}
Note: See TracBrowser for help on using the repository browser.