source: trunk/packages/xen-3.1/xen-3.1/tools/libxc/xc_core_ia64.c @ 34

Last change on this file since 34 was 34, checked in by hartmans, 18 years ago

Add xen and xen-common

File size: 9.8 KB
Line 
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
15 *
16 * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
17 *                    VA Linux Systems Japan K.K.
18 *
19 */
20
21#include "xg_private.h"
22#include "xc_core.h"
23#include "xc_efi.h"
24#include "xc_dom.h"
25#include <inttypes.h>
26
27static int
28xc_memory_map_cmp(const void *lhs__, const void *rhs__)
29{
30    const struct xc_core_memory_map *lhs =
31        (const struct xc_core_memory_map *)lhs__;
32    const struct xc_core_memory_map *rhs =
33        (const struct xc_core_memory_map *)rhs__;
34
35    if (lhs->addr < rhs->addr)
36        return -1;
37    if (lhs->addr > rhs->addr)
38        return 1;
39
40    /* memory map overlap isn't allowed. complain */
41    DPRINTF("duplicated addresses are detected "
42            "(0x%" PRIx64 ", 0x%" PRIx64 "), "
43            "(0x%" PRIx64 ", 0x%" PRIx64 ")\n",
44            lhs->addr, lhs->size, rhs->addr, rhs->size);
45    return 0;
46}
47
48int
49xc_core_arch_auto_translated_physmap(const xc_dominfo_t *info)
50{
51    /*
52     * on ia64, both paravirtualize domain and hvm domain are
53     * auto_translated_physmap mode
54     */
55    return 1;
56}
57
58/* see setup_guest() @ xc_linux_build.c */
59static int
60memory_map_get_old_domu(int xc_handle, xc_dominfo_t *info,
61                        shared_info_t *live_shinfo,
62                        xc_core_memory_map_t **mapp, unsigned int *nr_entries)
63{
64    xc_core_memory_map_t *map = NULL;
65
66    map = malloc(sizeof(*map));
67    if ( map == NULL )
68    {
69        PERROR("Could not allocate memory");
70        goto out;
71    }
72
73    map->addr = 0;
74    map->size = info->max_memkb * 1024;
75
76    *mapp = map;
77    *nr_entries = 1;
78    return 0;
79
80out:
81    if ( map != NULL )
82        free(map);
83    return -1;
84}
85
86/* see setup_guest() @ xc_ia64_hvm_build.c */
87static int
88memory_map_get_old_hvm(int xc_handle, xc_dominfo_t *info, 
89                       shared_info_t *live_shinfo,
90                       xc_core_memory_map_t **mapp, unsigned int *nr_entries)
91{
92    const xc_core_memory_map_t gfw_map[] = {
93        {IO_PAGE_START, IO_PAGE_SIZE},
94        {STORE_PAGE_START, STORE_PAGE_SIZE},
95        {BUFFER_IO_PAGE_START, BUFFER_IO_PAGE_SIZE},
96        {BUFFER_PIO_PAGE_START, BUFFER_PIO_PAGE_SIZE},
97        {GFW_START, GFW_SIZE},
98    };
99    const unsigned int nr_gfw_map = sizeof(gfw_map)/sizeof(gfw_map[0]);
100    xc_core_memory_map_t *map = NULL;
101    unsigned int i;
102   
103#define VGA_IO_END      (VGA_IO_START + VGA_IO_SIZE)
104    /* [0, VGA_IO_START) [VGA_IO_END, 3GB), [4GB, ...) + gfw_map */
105    map = malloc((3 + nr_gfw_map) * sizeof(*map));
106    if ( map == NULL )
107    {
108        PERROR("Could not allocate memory");
109        goto out;
110    }
111
112    for ( i = 0; i < nr_gfw_map; i++ )
113        map[i] = gfw_map[i];
114    map[i].addr = 0;
115    map[i].size = info->max_memkb * 1024;
116    i++;
117    if ( map[i - 1].size < VGA_IO_END )
118    {
119        map[i - 1].size = VGA_IO_START;
120    }
121    else
122    {
123        map[i].addr = VGA_IO_END;
124        map[i].size = map[i - 1].size - VGA_IO_END;
125        map[i - 1].size = VGA_IO_START;
126        i++;
127        if ( map[i - 1].addr + map[i - 1].size > MMIO_START )
128        {
129            map[i].addr = MMIO_START + 1 * MEM_G;
130            map[i].size = map[i - 1].addr + map[i - 1].size - MMIO_START;
131            map[i - 1].size = MMIO_START - map[i - 1].addr;
132            i++;
133        }
134    }
135    *mapp = map;
136    *nr_entries = i;
137    qsort(map, *nr_entries, sizeof(map[0]), &xc_memory_map_cmp);
138    return 0;
139
140out:
141    if ( map != NULL )
142        free(map);
143    return -1;
144}
145
146static int
147memory_map_get_old(int xc_handle, xc_dominfo_t *info, 
148                   shared_info_t *live_shinfo,
149                   xc_core_memory_map_t **mapp, unsigned int *nr_entries)
150{
151    if ( info->hvm )
152        return memory_map_get_old_hvm(xc_handle, info, live_shinfo,
153                                      mapp, nr_entries);
154    if ( live_shinfo == NULL )
155        return -1;
156    return memory_map_get_old_domu(xc_handle, info, live_shinfo,
157                                   mapp, nr_entries);
158}
159
160int
161xc_core_arch_memory_map_get(int xc_handle, xc_dominfo_t *info,
162                            shared_info_t *live_shinfo,
163                            xc_core_memory_map_t **mapp,
164                            unsigned int *nr_entries)
165{
166#ifdef notyet
167    int ret = -1;
168    xen_ia64_memmap_info_t *memmap_info;
169    xc_core_memory_map_t *map;
170    char *start;
171    char *end;
172    char *p;
173    efi_memory_desc_t *md;
174
175    if  ( live_shinfo == NULL || live_shinfo->arch.memmap_info_pfn == 0 )
176        goto old;
177
178    memmap_info = xc_map_foreign_range(xc_handle, info->domid,
179                                       PAGE_SIZE, PROT_READ,
180                                       live_shinfo->arch.memmap_info_pfn);
181    if ( memmap_info == NULL )
182    {
183        PERROR("Could not map memmap info.");
184        return -1;
185    }
186    if ( memmap_info->efi_memdesc_size != sizeof(*md) ||
187         (memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size) == 0 ||
188         memmap_info->efi_memmap_size > PAGE_SIZE - sizeof(memmap_info) ||
189         memmap_info->efi_memdesc_version != EFI_MEMORY_DESCRIPTOR_VERSION )
190    {
191        PERROR("unknown memmap header. defaulting to compat mode.");
192        munmap(memmap_info, PAGE_SIZE);
193        goto old;
194    }
195
196    *nr_entries = memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size;
197    map = malloc(*nr_entries * sizeof(*md));
198    if ( map == NULL )
199    {
200        PERROR("Could not allocate memory for memmap.");
201        goto out;
202    }
203    *mapp = map;
204
205    *nr_entries = 0;
206    start = (char*)&memmap_info->memdesc;
207    end = start + memmap_info->efi_memmap_size;
208    for ( p = start; p < end; p += memmap_info->efi_memdesc_size )
209    {
210        md = (efi_memory_desc_t*)p;
211        if ( md->type != EFI_CONVENTIONAL_MEMORY ||
212             md->attribute != EFI_MEMORY_WB ||
213             md->num_pages == 0 )
214            continue;
215
216        map[*nr_entries].addr = md->phys_addr;
217        map[*nr_entries].size = md->num_pages << EFI_PAGE_SHIFT;
218        (*nr_entries)++;
219    }
220    ret = 0;
221out:
222    munmap(memmap_info, PAGE_SIZE);
223    qsort(map, *nr_entries, sizeof(map[0]), &xc_memory_map_cmp);
224    return ret;
225   
226old:
227#endif
228    return memory_map_get_old(xc_handle, info, live_shinfo, mapp, nr_entries);
229}
230
231int
232xc_core_arch_map_p2m(int xc_handle, xc_dominfo_t *info,
233                     shared_info_t *live_shinfo, xen_pfn_t **live_p2m,
234                     unsigned long *pfnp)
235{
236    /*
237     * on ia64, both paravirtualize domain and hvm domain are
238     * auto_translated_physmap mode
239     */
240    errno = ENOSYS;
241    return -1;
242}
243
244void
245xc_core_arch_context_init(struct xc_core_arch_context* arch_ctxt)
246{
247    int i;
248
249    arch_ctxt->mapped_regs_size =
250        (XMAPPEDREGS_SIZE < PAGE_SIZE) ? PAGE_SIZE: XMAPPEDREGS_SIZE;
251    arch_ctxt->nr_vcpus = 0;
252    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
253        arch_ctxt->mapped_regs[i] = NULL;
254}
255
256void
257xc_core_arch_context_free(struct xc_core_arch_context* arch_ctxt)
258{
259    int i;
260    for ( i = 0; i < arch_ctxt->nr_vcpus; i++ )
261        if ( arch_ctxt->mapped_regs[i] != NULL )
262            munmap(arch_ctxt->mapped_regs[i], arch_ctxt->mapped_regs_size);
263}
264
265int
266xc_core_arch_context_get(struct xc_core_arch_context* arch_ctxt,
267                         vcpu_guest_context_t* ctxt,
268                         int xc_handle, uint32_t domid)
269{
270    mapped_regs_t* mapped_regs;
271    if ( ctxt->privregs_pfn == INVALID_P2M_ENTRY )
272    {
273        PERROR("Could not get mmapped privregs gmfn");
274        errno = ENOENT;
275        return -1;
276    }
277    mapped_regs = xc_map_foreign_range(xc_handle, domid,
278                                       arch_ctxt->mapped_regs_size,
279                                       PROT_READ, ctxt->privregs_pfn);
280    if ( mapped_regs == NULL )
281    {
282        PERROR("Could not map mapped privregs");
283        return -1;
284    }
285    arch_ctxt->mapped_regs[arch_ctxt->nr_vcpus] = mapped_regs;
286    arch_ctxt->nr_vcpus++;
287    return 0;
288}
289
290int
291xc_core_arch_context_get_shdr(struct xc_core_arch_context *arch_ctxt, 
292                              struct xc_core_section_headers *sheaders,
293                              struct xc_core_strtab *strtab,
294                              uint64_t *filesz, uint64_t offset)
295{
296    int sts = -1;
297    Elf64_Shdr *shdr;
298
299    /* mmapped priv regs */
300    shdr = xc_core_shdr_get(sheaders);
301    if ( shdr == NULL )
302    {
303        PERROR("Could not get section header for .xen_ia64_mapped_regs");
304        return sts;
305    }
306    *filesz = arch_ctxt->mapped_regs_size * arch_ctxt->nr_vcpus;
307    sts = xc_core_shdr_set(shdr, strtab, XEN_DUMPCORE_SEC_IA64_MAPPED_REGS,
308                           SHT_PROGBITS, offset, *filesz,
309                           __alignof__(*arch_ctxt->mapped_regs[0]),
310                           arch_ctxt->mapped_regs_size);
311    return sts;
312}
313
314int
315xc_core_arch_context_dump(struct xc_core_arch_context* arch_ctxt,
316                          void* args, dumpcore_rtn_t dump_rtn)
317{
318    int sts = 0;
319    int i;
320   
321    /* ia64 mapped_regs: .xen_ia64_mapped_regs */
322    for ( i = 0; i < arch_ctxt->nr_vcpus; i++ )
323    {
324        sts = dump_rtn(args, (char*)arch_ctxt->mapped_regs[i],
325                       arch_ctxt->mapped_regs_size);
326        if ( sts != 0 )
327            break;
328    }
329    return sts;
330}
331
332/*
333 * Local variables:
334 * mode: C
335 * c-set-style: "BSD"
336 * c-basic-offset: 4
337 * tab-width: 4
338 * indent-tabs-mode: nil
339 * End:
340 */
Note: See TracBrowser for help on using the repository browser.