source: trunk/packages/xen-common/xen-common/xen/arch/powerpc/mm.c @ 34

Last change on this file since 34 was 34, checked in by hartmans, 17 years ago

Add xen and xen-common

File size: 17.4 KB
Line 
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005, 2006
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 *          Jimi Xenidis <jimix@watson.ibm.com>
20 *          Ryan Harper <ryanh@us.ibm.com>
21 */
22
23#include <xen/config.h>
24#include <xen/mm.h>
25#include <xen/shadow.h>
26#include <xen/kernel.h>
27#include <xen/sched.h>
28#include <xen/perfc.h>
29#include <asm/init.h>
30#include <asm/page.h>
31#include <asm/platform.h>
32#include <asm/string.h>
33#include <asm/platform.h>
34#include <public/arch-powerpc.h>
35
36#ifdef VERBOSE
37#define MEM_LOG(_f, _a...)                                  \
38  printk("DOM%u: (file=mm.c, line=%d) " _f "\n",            \
39         current->domain->domain_id , __LINE__ , ## _a )
40#else
41#define MEM_LOG(_f, _a...) ((void)0)
42#endif
43
44/* Frame table and its size in pages. */
45struct page_info *frame_table;
46unsigned long max_page;
47unsigned long total_pages;
48
49/* machine to phys mapping to used by all domains */
50unsigned long *machine_phys_mapping;
51
52void __init init_frametable(void)
53{
54    unsigned long p;
55    unsigned long nr_pages;
56    int i;
57
58    nr_pages = PFN_UP(max_page * sizeof(struct page_info));
59
60    p = alloc_boot_pages(nr_pages, 1);
61    if (p == 0)
62        panic("Not enough memory for frame table\n");
63
64    frame_table = (struct page_info *)(p << PAGE_SHIFT);
65    for (i = 0; i < nr_pages; i += 1)
66        clear_page((void *)((p + i) << PAGE_SHIFT));
67}
68
69/* Array of PFNs, indexed by MFN. */
70void __init init_machine_to_phys_table(void)
71{
72    unsigned long p;
73    unsigned long nr_pages;
74    int i;
75
76    nr_pages = PFN_UP(max_page * sizeof(unsigned long));
77
78    p = alloc_boot_pages(nr_pages, 1);
79    if (p == 0)
80        panic("Not enough memory for machine phys mapping table\n");
81
82    machine_phys_mapping = (unsigned long *)(p << PAGE_SHIFT);
83    for (i = 0; i < nr_pages; i += 1)
84        clear_page((void *)((p + i) << PAGE_SHIFT));
85}
86
87void share_xen_page_with_guest(
88    struct page_info *page, struct domain *d, int readonly)
89{
90    if ( page_get_owner(page) == d )
91        return;
92
93    /* this causes us to leak pages in the Domain and reuslts in
94     * Zombie domains, I think we are missing a piece, until we find
95     * it we disable the following code */
96    set_gpfn_from_mfn(page_to_mfn(page), INVALID_M2P_ENTRY);
97
98    spin_lock(&d->page_alloc_lock);
99
100    /* The incremented type count pins as writable or read-only. */
101    page->u.inuse.type_info  = (readonly ? PGT_none : PGT_writable_page);
102    page->u.inuse.type_info |= PGT_validated | 1;
103
104    page_set_owner(page, d);
105    wmb(); /* install valid domain ptr before updating refcnt. */
106    ASSERT(page->count_info == 0);
107
108    /* Only add to the allocation list if the domain isn't dying. */
109    if ( !d->is_dying )
110    {
111        page->count_info |= PGC_allocated | 1;
112        if ( unlikely(d->xenheap_pages++ == 0) )
113            get_knownalive_domain(d);
114        list_add_tail(&page->list, &d->xenpage_list);
115    }
116
117    spin_unlock(&d->page_alloc_lock);
118}
119
120void share_xen_page_with_privileged_guests(
121    struct page_info *page, int readonly)
122{
123        unimplemented();
124}
125
126static ulong foreign_to_mfn(struct domain *d, ulong pfn)
127{
128
129    pfn -= 1UL << cpu_foreign_map_order();
130
131    BUG_ON(pfn >= d->arch.foreign_mfn_count);
132
133    return d->arch.foreign_mfns[pfn];
134}
135
136static int set_foreign(struct domain *d, ulong pfn, ulong mfn)
137{
138    pfn -= 1UL << cpu_foreign_map_order();
139
140    BUG_ON(pfn >= d->arch.foreign_mfn_count);
141    d->arch.foreign_mfns[pfn] = mfn;
142
143    return 0;
144}
145
146static int create_grant_va_mapping(
147    unsigned long va, unsigned long frame, struct vcpu *v)
148{
149    if (v->domain->domain_id != 0) {
150        printk("only Dom0 can map a grant entry\n");
151        BUG();
152        return GNTST_permission_denied;
153    }
154    set_foreign(v->domain, va >> PAGE_SHIFT, frame);
155    return GNTST_okay;
156}
157
158static int destroy_grant_va_mapping(
159    unsigned long addr, unsigned long frame, struct domain *d)
160{
161    if (d->domain_id != 0) {
162        printk("only Dom0 can map a grant entry\n");
163        BUG();
164        return GNTST_permission_denied;
165    }
166    set_foreign(d, addr >> PAGE_SHIFT, ~0UL);
167    return GNTST_okay;
168}
169
170int create_grant_host_mapping(
171    unsigned long addr, unsigned long frame, unsigned int flags)
172{
173    if (flags & GNTMAP_application_map) {
174        printk("%s: GNTMAP_application_map not supported\n", __func__);
175        BUG();
176        return GNTST_general_error;
177    }
178    if (flags & GNTMAP_contains_pte) {
179        printk("%s: GNTMAP_contains_pte not supported\n", __func__);
180        BUG();
181        return GNTST_general_error;
182    }
183    return create_grant_va_mapping(addr, frame, current);
184}
185
186int destroy_grant_host_mapping(
187    unsigned long addr, unsigned long frame, unsigned int flags)
188{
189    if (flags & GNTMAP_contains_pte) {
190        printk("%s: GNTMAP_contains_pte not supported\n", __func__);
191        BUG();
192        return GNTST_general_error;
193    }
194
195    /* may have force the remove here */
196    return destroy_grant_va_mapping(addr, frame, current->domain);
197}
198
199int steal_page(struct domain *d, struct page_info *page, unsigned int memflags)
200{
201    panic("%s called\n", __func__);
202    return 1;
203}
204
205void put_page_type(struct page_info *page)
206{
207    unsigned long nx, x, y = page->u.inuse.type_info;
208
209    do {
210        x  = y;
211        nx = x - 1;
212
213        ASSERT((x & PGT_count_mask) != 0);
214
215        /*
216         * The page should always be validated while a reference is held. The
217         * exception is during domain destruction, when we forcibly invalidate
218         * page-table pages if we detect a referential loop.
219         * See domain.c:relinquish_list().
220         */
221        ASSERT((x & PGT_validated) || page_get_owner(page)->is_dying);
222
223        if ( unlikely((nx & PGT_count_mask) == 0) )
224        {
225            /* Record TLB information for flush later. */
226            page->tlbflush_timestamp = tlbflush_current_time();
227        }
228    }
229    while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
230}
231
232
233int get_page_type(struct page_info *page, unsigned long type)
234{
235    unsigned long nx, x, y = page->u.inuse.type_info;
236
237    ASSERT(!(type & ~PGT_type_mask));
238
239 again:
240    do {
241        x  = y;
242        nx = x + 1;
243        if ( unlikely((nx & PGT_count_mask) == 0) )
244        {
245            MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page));
246            return 0;
247        }
248        else if ( unlikely((x & PGT_count_mask) == 0) )
249        {
250            if ( (x & PGT_type_mask) != type )
251            {
252                /*
253                 * On type change we check to flush stale TLB entries. This
254                 * may be unnecessary (e.g., page was GDT/LDT) but those
255                 * circumstances should be very rare.
256                 */
257                cpumask_t mask =
258                    page_get_owner(page)->domain_dirty_cpumask;
259                tlbflush_filter(mask, page->tlbflush_timestamp);
260
261                if ( unlikely(!cpus_empty(mask)) )
262                {
263                    perfc_incr(need_flush_tlb_flush);
264                    flush_tlb_mask(mask);
265                }
266
267                /* We lose existing type, back pointer, and validity. */
268                nx &= ~(PGT_type_mask | PGT_validated);
269                nx |= type;
270
271                /* No special validation needed for writable pages. */
272                /* Page tables and GDT/LDT need to be scanned for validity. */
273                if ( type == PGT_writable_page )
274                    nx |= PGT_validated;
275            }
276        }
277        else if ( unlikely((x & PGT_type_mask) != type) )
278        {
279            return 0;
280        }
281        else if ( unlikely(!(x & PGT_validated)) )
282        {
283            /* Someone else is updating validation of this page. Wait... */
284            while ( (y = page->u.inuse.type_info) == x )
285                cpu_relax();
286            goto again;
287        }
288    }
289    while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
290
291    if ( unlikely(!(nx & PGT_validated)) )
292    {
293        /* Noone else is updating simultaneously. */
294        __set_bit(_PGT_validated, &page->u.inuse.type_info);
295    }
296
297    return 1;
298}
299
300long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
301{
302    printk("%s: no PPC specific memory ops\n", __func__);
303    return -ENOSYS;
304}
305
306extern void copy_page(void *dp, void *sp)
307{
308    if (on_systemsim()) {
309        systemsim_memcpy(dp, sp, PAGE_SIZE);
310    } else {
311        memcpy(dp, sp, PAGE_SIZE);
312    }
313}
314
315/* Allocate (rma_nrpages - nrpages) more memory for domain in proper size. */
316uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages)
317{
318    struct page_info *pg;
319    ulong mfn;
320    ulong gpfn = rma_nrpages; /* starting PFN at end of RMA */
321    uint ext_order;
322    uint ext_nrpages;
323    uint total_nrpages;
324    int i;
325
326    ext_order = cpu_extent_order();
327    ext_nrpages = 1 << ext_order;
328
329    total_nrpages = rma_nrpages;
330
331    /* We only allocate in nr_extsz chunks so if you are not divisible
332     * you get more than you asked for. */
333    while (total_nrpages < nrpages) {
334        pg = alloc_domheap_pages(d, ext_order, 0);
335        if (pg == NULL)
336            return total_nrpages;
337
338        /* Build p2m mapping for newly allocated extent. */
339        mfn = page_to_mfn(pg);
340        for (i = 0; i < (1 << ext_order); i++)
341            guest_physmap_add_page(d, gpfn + i, mfn + i);
342
343        /* Bump starting PFN by extent size pages. */
344        gpfn += ext_nrpages;
345
346        total_nrpages += ext_nrpages;
347    }
348
349    return total_nrpages;
350}
351
352int allocate_rma(struct domain *d, unsigned int order)
353{
354    struct vcpu *v;
355    ulong rma_base;
356    ulong rma_sz;
357    ulong mfn;
358    int i;
359
360    if (d->arch.rma_page)
361        return -EINVAL;
362
363    d->arch.rma_page = alloc_domheap_pages(d, order, 0);
364    if (d->arch.rma_page == NULL) {
365        gdprintk(XENLOG_INFO, "Could not allocate order=%d RMA for domain %u\n",
366                order, d->domain_id);
367        return -ENOMEM;
368    }
369    d->arch.rma_order = order;
370
371    rma_base = page_to_maddr(d->arch.rma_page);
372    rma_sz = rma_size(d->arch.rma_order);
373
374    BUG_ON(rma_base & (rma_sz - 1)); /* check alignment */
375
376    printk("allocated RMA for Dom[%d]: 0x%lx[0x%lx]\n",
377           d->domain_id, rma_base, rma_sz);
378
379    mfn = page_to_mfn(d->arch.rma_page);
380
381    for (i = 0; i < (1 << d->arch.rma_order); i++ ) {
382        d->arch.rma_page[i].count_info |= PGC_page_RMA;
383        clear_page((void *)page_to_maddr(&d->arch.rma_page[i]));
384
385        /* Set up p2m mapping for RMA. */
386        guest_physmap_add_page(d, i, mfn+i);
387    }
388
389    /* shared_info uses last page of RMA */
390    d->shared_info = (shared_info_t *) (rma_base + rma_sz - PAGE_SIZE);
391
392    /* if there are already running vcpus, adjust v->vcpu_info */
393    /* XXX untested */
394    for_each_vcpu(d, v) {
395        v->vcpu_info = &d->shared_info->vcpu_info[v->vcpu_id];
396    }
397
398    return 0;
399}
400
401void free_rma_check(struct page_info *page)
402{
403    if (test_bit(_PGC_page_RMA, &page->count_info)) {
404        if (!page_get_owner(page)->is_dying) {
405            panic("Attempt to free an RMA page: 0x%lx\n", page_to_mfn(page));
406        } else {
407            clear_bit(_PGC_page_RMA, &page->count_info);
408        }
409    }
410}
411
412ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
413{
414    ulong mfn = INVALID_MFN;
415    int t = PFN_TYPE_NONE;
416    ulong foreign_map_pfn = 1UL << cpu_foreign_map_order();
417
418    /* quick tests first */
419    if (pfn & foreign_map_pfn) {
420        t = PFN_TYPE_FOREIGN;
421        mfn = foreign_to_mfn(d, pfn);
422    } else if (pfn >= max_page && pfn <
423               (max_page + nr_grant_frames(d->grant_table))) {
424        /* XXX access d->grant_table->nr_grant_frames without lock.
425         * Currently on powerpc dynamic expanding grant table is
426         * inhibited by setting max_nr_grant_frames = INITIAL_NR_GRANT_FRAMES
427         * so that this access is safe.
428         */
429        /* Its a grant table access */
430        t = PFN_TYPE_GNTTAB;
431        mfn = gnttab_shared_mfn(d, d->grant_table, (pfn - max_page));
432    } else if (d->is_privileged && platform_io_mfn(pfn)) {
433        t = PFN_TYPE_IO;
434        mfn = pfn;
435    } else {
436        if (pfn < d->arch.p2m_entries) {
437            t = PFN_TYPE_LOGICAL;
438            mfn = d->arch.p2m[pfn];
439        }
440#ifdef DEBUG
441        if (t != PFN_TYPE_NONE && d->is_dying &&
442            page_get_owner(mfn_to_page(mfn)) != d) {
443            printk("%s: page type: %d owner Dom[%d]:%p expected Dom[%d]:%p\n",
444                   __func__, t,
445                   page_get_owner(mfn_to_page(mfn))->domain_id,
446                   page_get_owner(mfn_to_page(mfn)),
447                   d->domain_id, d);
448            BUG();
449        }
450#endif
451    }
452
453    if (t == PFN_TYPE_NONE) {
454        /* This hack allows dom0 to map all memory, necessary to
455         * initialize domU state. */
456        if (d->is_privileged && mfn_valid(pfn)) {
457            struct page_info *pg;
458
459            /* page better be allocated to some domain but not the caller */
460            pg = mfn_to_page(pfn);
461            if (!(pg->count_info & PGC_allocated))
462                panic("Foreign page: 0x%lx is not owned by any domain\n",
463                      mfn);
464            if (page_get_owner(pg) == d)
465                panic("Foreign page: 0x%lx is owned by this domain\n",
466                      mfn);
467               
468            t = PFN_TYPE_FOREIGN;
469            mfn = pfn;
470        }
471    }
472
473    if (mfn == INVALID_MFN) {
474        printk("%s: Dom[%d] pfn 0x%lx is not a valid page\n",
475               __func__, d->domain_id, pfn);
476    }
477
478    if (type)
479        *type = t;
480
481    return mfn;
482}
483
484unsigned long mfn_to_gmfn(struct domain *d, unsigned long mfn)
485{
486    struct page_info *pg = mfn_to_page(mfn);
487    ulong gnttab_mfn;
488
489    /* is this our mfn? */
490    if (page_get_owner(pg) != d)
491        return INVALID_M2P_ENTRY;
492
493    /* XXX access d->grant_table->nr_grant_frames without lock.
494     * Currently on powerpc dynamic expanding grant table is
495     * inhibited by setting max_nr_grant_frames = INITIAL_NR_GRANT_FRAMES
496     * so that this access is safe.
497     */
498    /* grant? */
499    gnttab_mfn = gnttab_shared_mfn(d, d->grant_table, 0);
500    if (mfn >= gnttab_mfn && mfn <
501        (gnttab_mfn + nr_grant_frames(d->grant_table)))
502        return max_page + (mfn - gnttab_mfn);
503
504    /* IO? */
505    if (d->is_privileged && platform_io_mfn(mfn))
506        return mfn;
507
508    /* check m2p table */
509    return get_gpfn_from_mfn(mfn);
510}
511
512/* NB: caller holds d->page_alloc lock, sets d->max_pages = new_max */
513int guest_physmap_max_mem_pages(struct domain *d, unsigned long new_max_pages)
514{
515    u32 *p2m_array = NULL;
516    u32 *p2m_old = NULL;
517    ulong i;
518
519    /* XXX We probably could, but right now we don't shrink the p2m array.
520     * NB: d->max_pages >= d->arch.p2m_entries */
521    if (new_max_pages < d->max_pages) {
522        printk("Can't shrink DOM%d max memory pages\n", d->domain_id);
523        return -EINVAL;
524    }
525
526    /* Allocate one u32 per page. */
527    p2m_array = xmalloc_array(u32, new_max_pages);
528    if (p2m_array == NULL)
529        return -ENOMEM;
530
531    /* Copy old mappings into new array. */
532    if (d->arch.p2m != NULL) {
533        /* XXX This could take a long time; we should use a continuation. */
534        memcpy(p2m_array, d->arch.p2m, d->arch.p2m_entries * sizeof(u32));
535        p2m_old = d->arch.p2m;
536    }
537
538    /* Mark new mfns as invalid. */
539    for (i = d->arch.p2m_entries; i < new_max_pages; i++)
540        p2m_array[i] = INVALID_MFN;
541
542    /* Set new p2m pointer and size. */
543    d->arch.p2m = p2m_array;
544    d->arch.p2m_entries = new_max_pages;
545
546    /* Free old p2m array if present. */
547    if (p2m_old)
548        xfree(p2m_old);
549
550    return 0;
551}
552
553void guest_physmap_add_page(
554    struct domain *d, unsigned long gpfn, unsigned long mfn)
555{
556    if (page_get_owner(mfn_to_page(mfn)) != d) {
557        printk("Won't map foreign MFN 0x%lx for DOM%d\n", mfn, d->domain_id);
558        return;
559    }
560
561    /* Check that pfn is within guest table. */
562    if (gpfn >= d->arch.p2m_entries) {
563        printk("Won't map invalid PFN 0x%lx for DOM%d\n", gpfn, d->domain_id);
564        return;
565    }
566
567    /* Warn if there is an existing mapping. */
568    /* XXX: probably shouldn't let this happen, but
569       current interface doesn't throw errors.  =( */
570    if (d->arch.p2m[gpfn] != INVALID_MFN)
571        printk("Ack! PFN aliased. PFN%lx, old MFN=%x, new MFN=%lx\n",
572                gpfn, d->arch.p2m[gpfn], mfn);
573
574    /* PFN and MFN ok, map in p2m table. */
575    d->arch.p2m[gpfn] = mfn;
576
577    /* Map in m2p table. */
578    set_gpfn_from_mfn(mfn, gpfn);
579}
580
581void guest_physmap_remove_page(
582    struct domain *d, unsigned long gpfn, unsigned long mfn)
583{
584    if (page_get_owner(mfn_to_page(mfn)) != d) {
585        printk("Won't unmap foreign MFN 0x%lx for DOM%d\n", mfn, d->domain_id);
586        return;
587    }
588
589    /* check that pfn is within guest table */
590    if (gpfn >= d->arch.p2m_entries) {
591        printk("Won't unmap invalid PFN 0x%lx for DOM%d\n", gpfn, d->domain_id);
592        return;
593    }
594
595    /* PFN and MFN ok, unmap from p2m table. */
596    d->arch.p2m[gpfn] = INVALID_MFN;
597
598    /* Unmap from m2p table. */
599    set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
600}
601
602void shadow_drop_references(
603    struct domain *d, struct page_info *page)
604{
605}
Note: See TracBrowser for help on using the repository browser.