1 | #ifndef _REGIONREG_H_ |
---|
2 | #define _REGIONREG_H_ |
---|
3 | #define XEN_DEFAULT_RID 7 |
---|
4 | #define IA64_MIN_IMPL_RID_MSB 17 |
---|
5 | #define _REGION_ID(x) ({ia64_rr _v; _v.rrval = (long) (x); _v.rid;}) |
---|
6 | #define _REGION_PAGE_SIZE(x) ({ia64_rr _v; _v.rrval = (long) (x); _v.ps;}) |
---|
7 | #define _REGION_HW_WALKER(x) ({ia64_rr _v; _v.rrval = (long) (x); _v.ve;}) |
---|
8 | #define _MAKE_RR(r, sz, v) ({ia64_rr _v; _v.rrval=0;_v.rid=(r);_v.ps=(sz);_v.ve=(v);_v.rrval;}) |
---|
9 | |
---|
10 | typedef union ia64_rr { |
---|
11 | struct { |
---|
12 | unsigned long ve : 1; /* enable hw walker */ |
---|
13 | unsigned long reserved0 : 1; /* reserved */ |
---|
14 | unsigned long ps : 6; /* log page size */ |
---|
15 | unsigned long rid: 24; /* region id */ |
---|
16 | unsigned long reserved1 : 32; /* reserved */ |
---|
17 | }; |
---|
18 | unsigned long rrval; |
---|
19 | } ia64_rr; |
---|
20 | |
---|
21 | // |
---|
22 | // region register macros |
---|
23 | // |
---|
24 | #define RR_TO_VE(arg) (((arg) >> 0) & 0x0000000000000001) |
---|
25 | #define RR_VE(arg) (((arg) & 0x0000000000000001) << 0) |
---|
26 | #define RR_VE_MASK 0x0000000000000001L |
---|
27 | #define RR_VE_SHIFT 0 |
---|
28 | #define RR_TO_PS(arg) (((arg) >> 2) & 0x000000000000003f) |
---|
29 | #define RR_PS(arg) (((arg) & 0x000000000000003f) << 2) |
---|
30 | #define RR_PS_MASK 0x00000000000000fcL |
---|
31 | #define RR_PS_SHIFT 2 |
---|
32 | #define RR_TO_RID(arg) (((arg) >> 8) & 0x0000000000ffffff) |
---|
33 | #define RR_RID(arg) (((arg) & 0x0000000000ffffff) << 8) |
---|
34 | #define RR_RID_MASK 0x00000000ffffff00L |
---|
35 | |
---|
36 | |
---|
37 | int set_one_rr(unsigned long rr, unsigned long val); |
---|
38 | |
---|
39 | // This function is purely for performance... apparently scrambling |
---|
40 | // bits in the region id makes for better hashing, which means better |
---|
41 | // use of the VHPT, which means better performance |
---|
42 | // Note that the only time a RID should be mangled is when it is stored in |
---|
43 | // a region register; anytime it is "viewable" outside of this module, |
---|
44 | // it should be unmangled |
---|
45 | |
---|
46 | // NOTE: this function is also implemented in assembly code in hyper_set_rr!! |
---|
47 | // Must ensure these two remain consistent! |
---|
48 | static inline unsigned long |
---|
49 | vmMangleRID(unsigned long RIDVal) |
---|
50 | { |
---|
51 | union bits64 { unsigned char bytes[4]; unsigned long uint; }; |
---|
52 | |
---|
53 | union bits64 t; |
---|
54 | unsigned char tmp; |
---|
55 | |
---|
56 | t.uint = RIDVal; |
---|
57 | tmp = t.bytes[1]; |
---|
58 | t.bytes[1] = t.bytes[3]; |
---|
59 | t.bytes[3] = tmp; |
---|
60 | |
---|
61 | return t.uint; |
---|
62 | } |
---|
63 | |
---|
64 | // since vmMangleRID is symmetric, use it for unmangling also |
---|
65 | #define vmUnmangleRID(x) vmMangleRID(x) |
---|
66 | |
---|
67 | extern void init_rid_allocator (void); |
---|
68 | |
---|
69 | struct domain; |
---|
70 | |
---|
71 | /* Allocate RIDs range and metaphysical RIDs for domain d. |
---|
72 | If ridbits is 0, a default value is used instead. */ |
---|
73 | extern int allocate_rid_range(struct domain *d, unsigned long ridbits); |
---|
74 | extern int deallocate_rid_range(struct domain *d); |
---|
75 | |
---|
76 | struct vcpu; |
---|
77 | extern void init_all_rr(struct vcpu *v); |
---|
78 | |
---|
79 | extern int set_metaphysical_rr0(void); |
---|
80 | |
---|
81 | extern void load_region_regs(struct vcpu *v); |
---|
82 | |
---|
83 | #endif /* !_REGIONREG_H_ */ |
---|