1 | #ifdef CONFIG_COMPAT |
---|
2 | |
---|
3 | #include <xen/event.h> |
---|
4 | #include <xen/multicall.h> |
---|
5 | #include <compat/memory.h> |
---|
6 | #include <compat/xen.h> |
---|
7 | |
---|
8 | int compat_set_gdt(XEN_GUEST_HANDLE(uint) frame_list, unsigned int entries) |
---|
9 | { |
---|
10 | unsigned int i, nr_pages = (entries + 511) / 512; |
---|
11 | unsigned long frames[16]; |
---|
12 | long ret; |
---|
13 | |
---|
14 | /* Rechecked in set_gdt, but ensures a sane limit for copy_from_user(). */ |
---|
15 | if ( entries > FIRST_RESERVED_GDT_ENTRY ) |
---|
16 | return -EINVAL; |
---|
17 | |
---|
18 | if ( !guest_handle_okay(frame_list, nr_pages) ) |
---|
19 | return -EFAULT; |
---|
20 | |
---|
21 | for ( i = 0; i < nr_pages; ++i ) |
---|
22 | { |
---|
23 | unsigned int frame; |
---|
24 | |
---|
25 | if ( __copy_from_guest(&frame, frame_list, 1) ) |
---|
26 | return -EFAULT; |
---|
27 | frames[i] = frame; |
---|
28 | guest_handle_add_offset(frame_list, 1); |
---|
29 | } |
---|
30 | |
---|
31 | LOCK_BIGLOCK(current->domain); |
---|
32 | |
---|
33 | if ( (ret = set_gdt(current, frames, entries)) == 0 ) |
---|
34 | local_flush_tlb(); |
---|
35 | |
---|
36 | UNLOCK_BIGLOCK(current->domain); |
---|
37 | |
---|
38 | return ret; |
---|
39 | } |
---|
40 | |
---|
41 | int compat_update_descriptor(u32 pa_lo, u32 pa_hi, u32 desc_lo, u32 desc_hi) |
---|
42 | { |
---|
43 | return do_update_descriptor(pa_lo | ((u64)pa_hi << 32), |
---|
44 | desc_lo | ((u64)desc_hi << 32)); |
---|
45 | } |
---|
46 | |
---|
47 | int compat_arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) |
---|
48 | { |
---|
49 | struct compat_machphys_mfn_list xmml; |
---|
50 | l2_pgentry_t l2e; |
---|
51 | unsigned long v; |
---|
52 | compat_pfn_t mfn; |
---|
53 | unsigned int i; |
---|
54 | int rc = 0; |
---|
55 | |
---|
56 | switch ( op ) |
---|
57 | { |
---|
58 | case XENMEM_add_to_physmap: |
---|
59 | { |
---|
60 | struct compat_add_to_physmap cmp; |
---|
61 | struct xen_add_to_physmap *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id); |
---|
62 | |
---|
63 | if ( copy_from_guest(&cmp, arg, 1) ) |
---|
64 | return -EFAULT; |
---|
65 | |
---|
66 | XLAT_add_to_physmap(nat, &cmp); |
---|
67 | rc = arch_memory_op(op, guest_handle_from_ptr(nat, void)); |
---|
68 | |
---|
69 | break; |
---|
70 | } |
---|
71 | |
---|
72 | case XENMEM_set_memory_map: |
---|
73 | { |
---|
74 | struct compat_foreign_memory_map cmp; |
---|
75 | struct xen_foreign_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id); |
---|
76 | |
---|
77 | if ( copy_from_guest(&cmp, arg, 1) ) |
---|
78 | return -EFAULT; |
---|
79 | |
---|
80 | #define XLAT_memory_map_HNDL_buffer(_d_, _s_) \ |
---|
81 | guest_from_compat_handle((_d_)->buffer, (_s_)->buffer) |
---|
82 | XLAT_foreign_memory_map(nat, &cmp); |
---|
83 | #undef XLAT_memory_map_HNDL_buffer |
---|
84 | |
---|
85 | rc = arch_memory_op(op, guest_handle_from_ptr(nat, void)); |
---|
86 | |
---|
87 | break; |
---|
88 | } |
---|
89 | |
---|
90 | case XENMEM_memory_map: |
---|
91 | case XENMEM_machine_memory_map: |
---|
92 | { |
---|
93 | struct compat_memory_map cmp; |
---|
94 | struct xen_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id); |
---|
95 | |
---|
96 | if ( copy_from_guest(&cmp, arg, 1) ) |
---|
97 | return -EFAULT; |
---|
98 | |
---|
99 | #define XLAT_memory_map_HNDL_buffer(_d_, _s_) \ |
---|
100 | guest_from_compat_handle((_d_)->buffer, (_s_)->buffer) |
---|
101 | XLAT_memory_map(nat, &cmp); |
---|
102 | #undef XLAT_memory_map_HNDL_buffer |
---|
103 | |
---|
104 | rc = arch_memory_op(op, guest_handle_from_ptr(nat, void)); |
---|
105 | if ( rc < 0 ) |
---|
106 | break; |
---|
107 | |
---|
108 | #define XLAT_memory_map_HNDL_buffer(_d_, _s_) ((void)0) |
---|
109 | XLAT_memory_map(&cmp, nat); |
---|
110 | #undef XLAT_memory_map_HNDL_buffer |
---|
111 | if ( copy_to_guest(arg, &cmp, 1) ) |
---|
112 | rc = -EFAULT; |
---|
113 | |
---|
114 | break; |
---|
115 | } |
---|
116 | |
---|
117 | case XENMEM_machphys_mapping: |
---|
118 | { |
---|
119 | struct domain *d = current->domain; |
---|
120 | struct compat_machphys_mapping mapping = { |
---|
121 | .v_start = MACH2PHYS_COMPAT_VIRT_START(d), |
---|
122 | .v_end = MACH2PHYS_COMPAT_VIRT_END, |
---|
123 | .max_mfn = MACH2PHYS_COMPAT_NR_ENTRIES(d) - 1 |
---|
124 | }; |
---|
125 | |
---|
126 | if ( copy_to_guest(arg, &mapping, 1) ) |
---|
127 | rc = -EFAULT; |
---|
128 | |
---|
129 | break; |
---|
130 | } |
---|
131 | |
---|
132 | case XENMEM_machphys_mfn_list: |
---|
133 | if ( copy_from_guest(&xmml, arg, 1) ) |
---|
134 | return -EFAULT; |
---|
135 | |
---|
136 | for ( i = 0, v = RDWR_COMPAT_MPT_VIRT_START; |
---|
137 | (i != xmml.max_extents) && (v != RDWR_COMPAT_MPT_VIRT_END); |
---|
138 | i++, v += 1 << L2_PAGETABLE_SHIFT ) |
---|
139 | { |
---|
140 | l2e = compat_idle_pg_table_l2[l2_table_offset(v)]; |
---|
141 | if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) |
---|
142 | break; |
---|
143 | mfn = l2e_get_pfn(l2e) + l1_table_offset(v); |
---|
144 | if ( copy_to_compat_offset(xmml.extent_start, i, &mfn, 1) ) |
---|
145 | return -EFAULT; |
---|
146 | } |
---|
147 | |
---|
148 | xmml.nr_extents = i; |
---|
149 | if ( copy_to_guest(arg, &xmml, 1) ) |
---|
150 | rc = -EFAULT; |
---|
151 | |
---|
152 | break; |
---|
153 | |
---|
154 | default: |
---|
155 | rc = -ENOSYS; |
---|
156 | break; |
---|
157 | } |
---|
158 | |
---|
159 | return rc; |
---|
160 | } |
---|
161 | |
---|
162 | int compat_update_va_mapping(unsigned int va, u32 lo, u32 hi, |
---|
163 | unsigned int flags) |
---|
164 | { |
---|
165 | return do_update_va_mapping(va, lo | ((u64)hi << 32), flags); |
---|
166 | } |
---|
167 | |
---|
168 | int compat_update_va_mapping_otherdomain(unsigned long va, u32 lo, u32 hi, |
---|
169 | unsigned long flags, |
---|
170 | domid_t domid) |
---|
171 | { |
---|
172 | return do_update_va_mapping_otherdomain(va, lo | ((u64)hi << 32), flags, domid); |
---|
173 | } |
---|
174 | |
---|
175 | DEFINE_XEN_GUEST_HANDLE(mmuext_op_compat_t); |
---|
176 | |
---|
177 | int compat_mmuext_op(XEN_GUEST_HANDLE(mmuext_op_compat_t) cmp_uops, |
---|
178 | unsigned int count, |
---|
179 | XEN_GUEST_HANDLE(uint) pdone, |
---|
180 | unsigned int foreigndom) |
---|
181 | { |
---|
182 | unsigned int i, preempt_mask; |
---|
183 | int rc = 0; |
---|
184 | XEN_GUEST_HANDLE(mmuext_op_t) nat_ops; |
---|
185 | |
---|
186 | preempt_mask = count & MMU_UPDATE_PREEMPTED; |
---|
187 | count ^= preempt_mask; |
---|
188 | |
---|
189 | if ( unlikely(!guest_handle_okay(cmp_uops, count)) ) |
---|
190 | return -EFAULT; |
---|
191 | |
---|
192 | set_xen_guest_handle(nat_ops, (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id)); |
---|
193 | |
---|
194 | for ( ; count; count -= i ) |
---|
195 | { |
---|
196 | mmuext_op_t *nat_op = nat_ops.p; |
---|
197 | unsigned int limit; |
---|
198 | int err; |
---|
199 | |
---|
200 | if ( hypercall_preempt_check() ) |
---|
201 | { |
---|
202 | rc = hypercall_create_continuation( |
---|
203 | __HYPERVISOR_mmuext_op, "hihi", |
---|
204 | cmp_uops, count | MMU_UPDATE_PREEMPTED, pdone, foreigndom); |
---|
205 | break; |
---|
206 | } |
---|
207 | |
---|
208 | limit = COMPAT_ARG_XLAT_SIZE / sizeof(*nat_op); |
---|
209 | |
---|
210 | for ( i = 0; i < min(limit, count); ++i ) |
---|
211 | { |
---|
212 | mmuext_op_compat_t cmp_op; |
---|
213 | enum XLAT_mmuext_op_arg1 arg1; |
---|
214 | enum XLAT_mmuext_op_arg2 arg2; |
---|
215 | |
---|
216 | if ( unlikely(__copy_from_guest(&cmp_op, cmp_uops, 1) != 0) ) |
---|
217 | { |
---|
218 | rc = -EFAULT; |
---|
219 | break; |
---|
220 | } |
---|
221 | |
---|
222 | switch ( cmp_op.cmd ) |
---|
223 | { |
---|
224 | case MMUEXT_PIN_L1_TABLE: |
---|
225 | case MMUEXT_PIN_L2_TABLE: |
---|
226 | case MMUEXT_PIN_L3_TABLE: |
---|
227 | case MMUEXT_PIN_L4_TABLE: |
---|
228 | case MMUEXT_UNPIN_TABLE: |
---|
229 | case MMUEXT_NEW_BASEPTR: |
---|
230 | arg1 = XLAT_mmuext_op_arg1_mfn; |
---|
231 | break; |
---|
232 | default: |
---|
233 | arg1 = XLAT_mmuext_op_arg1_linear_addr; |
---|
234 | break; |
---|
235 | case MMUEXT_NEW_USER_BASEPTR: |
---|
236 | rc = -EINVAL; |
---|
237 | case MMUEXT_TLB_FLUSH_LOCAL: |
---|
238 | case MMUEXT_TLB_FLUSH_MULTI: |
---|
239 | case MMUEXT_TLB_FLUSH_ALL: |
---|
240 | case MMUEXT_FLUSH_CACHE: |
---|
241 | arg1 = -1; |
---|
242 | break; |
---|
243 | } |
---|
244 | |
---|
245 | if ( rc ) |
---|
246 | break; |
---|
247 | |
---|
248 | switch ( cmp_op.cmd ) |
---|
249 | { |
---|
250 | case MMUEXT_SET_LDT: |
---|
251 | arg2 = XLAT_mmuext_op_arg2_nr_ents; |
---|
252 | break; |
---|
253 | case MMUEXT_TLB_FLUSH_MULTI: |
---|
254 | case MMUEXT_INVLPG_MULTI: |
---|
255 | arg2 = XLAT_mmuext_op_arg2_vcpumask; |
---|
256 | break; |
---|
257 | default: |
---|
258 | arg2 = -1; |
---|
259 | break; |
---|
260 | } |
---|
261 | |
---|
262 | #define XLAT_mmuext_op_HNDL_arg2_vcpumask(_d_, _s_) \ |
---|
263 | do \ |
---|
264 | { \ |
---|
265 | unsigned int vcpumask; \ |
---|
266 | if ( i < --limit ) \ |
---|
267 | { \ |
---|
268 | (_d_)->arg2.vcpumask.p = (void *)(nat_ops.p + limit); \ |
---|
269 | if ( copy_from_compat(&vcpumask, (_s_)->arg2.vcpumask, 1) == 0 ) \ |
---|
270 | *(unsigned long *)(_d_)->arg2.vcpumask.p = vcpumask; \ |
---|
271 | else \ |
---|
272 | rc = -EFAULT; \ |
---|
273 | } \ |
---|
274 | } while(0) |
---|
275 | XLAT_mmuext_op(nat_op, &cmp_op); |
---|
276 | #undef XLAT_mmuext_op_HNDL_arg2_vcpumask |
---|
277 | |
---|
278 | if ( rc || i >= limit ) |
---|
279 | break; |
---|
280 | |
---|
281 | guest_handle_add_offset(cmp_uops, 1); |
---|
282 | ++nat_op; |
---|
283 | } |
---|
284 | |
---|
285 | err = do_mmuext_op(nat_ops, i | preempt_mask, pdone, foreigndom); |
---|
286 | |
---|
287 | if ( err ) |
---|
288 | { |
---|
289 | BUILD_BUG_ON(__HYPERVISOR_mmuext_op <= 0); |
---|
290 | if ( err == __HYPERVISOR_mmuext_op ) |
---|
291 | { |
---|
292 | struct cpu_user_regs *regs = guest_cpu_user_regs(); |
---|
293 | struct mc_state *mcs = &this_cpu(mc_state); |
---|
294 | unsigned int arg1 = !test_bit(_MCSF_in_multicall, &mcs->flags) |
---|
295 | ? regs->ecx |
---|
296 | : mcs->call.args[1]; |
---|
297 | unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED; |
---|
298 | |
---|
299 | BUG_ON(left == arg1); |
---|
300 | BUG_ON(left > count); |
---|
301 | guest_handle_add_offset(nat_ops, count - left); |
---|
302 | BUG_ON(left + i < count); |
---|
303 | guest_handle_add_offset(cmp_uops, (signed int)(count - left - i)); |
---|
304 | left = 1; |
---|
305 | BUG_ON(!hypercall_xlat_continuation(&left, 0x01, nat_ops, cmp_uops)); |
---|
306 | BUG_ON(left != arg1); |
---|
307 | if (!test_bit(_MCSF_in_multicall, &mcs->flags)) |
---|
308 | regs->_ecx += count - i; |
---|
309 | else |
---|
310 | mcs->compat_call.args[1] += count - i; |
---|
311 | } |
---|
312 | else |
---|
313 | BUG_ON(err > 0); |
---|
314 | rc = err; |
---|
315 | } |
---|
316 | |
---|
317 | if ( rc ) |
---|
318 | break; |
---|
319 | |
---|
320 | /* Force do_mmuext_op() to not start counting from zero again. */ |
---|
321 | preempt_mask = MMU_UPDATE_PREEMPTED; |
---|
322 | } |
---|
323 | |
---|
324 | return rc; |
---|
325 | } |
---|
326 | |
---|
327 | #endif /* CONFIG_COMPAT */ |
---|
328 | |
---|
329 | /* |
---|
330 | * Local variables: |
---|
331 | * mode: C |
---|
332 | * c-set-style: "BSD" |
---|
333 | * c-basic-offset: 4 |
---|
334 | * tab-width: 4 |
---|
335 | * indent-tabs-mode: nil |
---|
336 | * End: |
---|
337 | */ |
---|