source: trunk/packages/xen-common/xen-common/xen/arch/x86/hvm/vioapic.c @ 34

Last change on this file since 34 was 34, checked in by hartmans, 17 years ago

Add xen and xen-common

File size: 13.9 KB
Line 
1/*
2 *  Copyright (C) 2001  MandrakeSoft S.A.
3 *
4 *    MandrakeSoft S.A.
5 *    43, rue d'Aboukir
6 *    75002 Paris - France
7 *    http://www.linux-mandrake.com/
8 *    http://www.mandrakesoft.com/
9 *
10 *  This library is free software; you can redistribute it and/or
11 *  modify it under the terms of the GNU Lesser General Public
12 *  License as published by the Free Software Foundation; either
13 *  version 2 of the License, or (at your option) any later version.
14 *
15 *  This library is distributed in the hope that it will be useful,
16 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 *  Lesser General Public License for more details.
19 *
20 *  You should have received a copy of the GNU Lesser General Public
21 *  License along with this library; if not, write to the Free Software
22 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
23 *
24 *  Yunhong Jiang <yunhong.jiang@intel.com>
25 *  Ported to xen by using virtual IRQ line.
26 */
27
28#include <xen/config.h>
29#include <xen/types.h>
30#include <xen/mm.h>
31#include <xen/xmalloc.h>
32#include <xen/lib.h>
33#include <xen/errno.h>
34#include <xen/sched.h>
35#include <public/hvm/ioreq.h>
36#include <asm/hvm/io.h>
37#include <asm/hvm/vpic.h>
38#include <asm/hvm/vlapic.h>
39#include <asm/hvm/support.h>
40#include <asm/current.h>
41#include <asm/event.h>
42
43/* HACK: Route IRQ0 only to VCPU0 to prevent time jumps. */
44#define IRQ0_SPECIAL_ROUTING 1
45
46#if defined(__ia64__)
47#define opt_hvm_debug_level opt_vmx_debug_level
48#endif
49
50static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq);
51
52static unsigned long vioapic_read_indirect(struct hvm_hw_vioapic *vioapic,
53                                           unsigned long addr,
54                                           unsigned long length)
55{
56    unsigned long result = 0;
57
58    switch ( vioapic->ioregsel )
59    {
60    case VIOAPIC_REG_VERSION:
61        result = ((((VIOAPIC_NUM_PINS-1) & 0xff) << 16)
62                  | (VIOAPIC_VERSION_ID & 0xff));
63        break;
64
65#if !VIOAPIC_IS_IOSAPIC
66    case VIOAPIC_REG_APIC_ID:
67    case VIOAPIC_REG_ARB_ID:
68        result = ((vioapic->id & 0xf) << 24);
69        break;
70#endif
71
72    default:
73    {
74        uint32_t redir_index = (vioapic->ioregsel - 0x10) >> 1;
75        uint64_t redir_content;
76
77        if ( redir_index >= VIOAPIC_NUM_PINS )
78        {
79            gdprintk(XENLOG_WARNING, "apic_mem_readl:undefined ioregsel %x\n",
80                     vioapic->ioregsel);
81            break;
82        }
83
84        redir_content = vioapic->redirtbl[redir_index].bits;
85        result = (vioapic->ioregsel & 0x1)?
86            (redir_content >> 32) & 0xffffffff :
87            redir_content & 0xffffffff;
88        break;
89    }
90    }
91
92    return result;
93}
94
95static unsigned long vioapic_read(struct vcpu *v,
96                                  unsigned long addr,
97                                  unsigned long length)
98{
99    struct hvm_hw_vioapic *vioapic = domain_vioapic(v->domain);
100    uint32_t result;
101
102    HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "vioapic_read addr %lx\n", addr);
103
104    addr &= 0xff;
105
106    switch ( addr )
107    {
108    case VIOAPIC_REG_SELECT:
109        result = vioapic->ioregsel;
110        break;
111
112    case VIOAPIC_REG_WINDOW:
113        result = vioapic_read_indirect(vioapic, addr, length);
114        break;
115
116    default:
117        result = 0;
118        break;
119    }
120
121    return result;
122}
123
124static void vioapic_write_redirent(
125    struct hvm_hw_vioapic *vioapic, unsigned int idx, int top_word, uint32_t val)
126{
127    struct domain *d = vioapic_domain(vioapic);
128    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
129    union vioapic_redir_entry *pent, ent;
130
131    spin_lock(&d->arch.hvm_domain.irq_lock);
132
133    pent = &vioapic->redirtbl[idx];
134    ent  = *pent;
135
136    if ( top_word )
137    {
138        /* Contains only the dest_id. */
139        ent.bits = (uint32_t)ent.bits | ((uint64_t)val << 32);
140    }
141    else
142    {
143        /* Remote IRR and Delivery Status are read-only. */
144        ent.bits = ((ent.bits >> 32) << 32) | val;
145        ent.fields.delivery_status = 0;
146        ent.fields.remote_irr = pent->fields.remote_irr;
147    }
148
149    *pent = ent;
150
151    if ( (ent.fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
152         !ent.fields.mask &&
153         !ent.fields.remote_irr &&
154         hvm_irq->gsi_assert_count[idx] )
155    {
156        pent->fields.remote_irr = 1;
157        vioapic_deliver(vioapic, idx);
158    }
159
160    spin_unlock(&d->arch.hvm_domain.irq_lock);
161}
162
163static void vioapic_write_indirect(
164    struct hvm_hw_vioapic *vioapic, unsigned long addr,
165    unsigned long length, unsigned long val)
166{
167    switch ( vioapic->ioregsel )
168    {
169    case VIOAPIC_REG_VERSION:
170        /* Writes are ignored. */
171        break;
172
173#if !VIOAPIC_IS_IOSAPIC
174    case VIOAPIC_REG_APIC_ID:
175        vioapic->id = (val >> 24) & 0xf;
176        break;
177
178    case VIOAPIC_REG_ARB_ID:
179        break;
180#endif
181
182    default:
183    {
184        uint32_t redir_index = (vioapic->ioregsel - 0x10) >> 1;
185
186        HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "vioapic_write_indirect "
187                    "change redir index %x val %lx\n",
188                    redir_index, val);
189
190        if ( redir_index >= VIOAPIC_NUM_PINS )
191        {
192            gdprintk(XENLOG_WARNING, "vioapic_write_indirect "
193                     "error register %x\n", vioapic->ioregsel);
194            break;
195        }
196
197        vioapic_write_redirent(
198            vioapic, redir_index, vioapic->ioregsel&1, val);
199        break;
200    }
201    }
202}
203
204static void vioapic_write(struct vcpu *v,
205                          unsigned long addr,
206                          unsigned long length,
207                          unsigned long val)
208{
209    struct hvm_hw_vioapic *vioapic = domain_vioapic(v->domain);
210
211    addr &= 0xff;
212
213    switch ( addr )
214    {
215    case VIOAPIC_REG_SELECT:
216        vioapic->ioregsel = val;
217        break;
218
219    case VIOAPIC_REG_WINDOW:
220        vioapic_write_indirect(vioapic, addr, length, val);
221        break;
222
223#if VIOAPIC_IS_IOSAPIC
224    case VIOAPIC_REG_EOI:
225        vioapic_update_EOI(v->domain, val);
226        break;
227#endif
228
229    default:
230        break;
231    }
232}
233
234static int vioapic_range(struct vcpu *v, unsigned long addr)
235{
236    struct hvm_hw_vioapic *vioapic = domain_vioapic(v->domain);
237
238    return ((addr >= vioapic->base_address &&
239             (addr < vioapic->base_address + VIOAPIC_MEM_LENGTH)));
240}
241
242struct hvm_mmio_handler vioapic_mmio_handler = {
243    .check_handler = vioapic_range,
244    .read_handler = vioapic_read,
245    .write_handler = vioapic_write
246};
247
248static void ioapic_inj_irq(
249    struct hvm_hw_vioapic *vioapic,
250    struct vlapic *target,
251    uint8_t vector,
252    uint8_t trig_mode,
253    uint8_t delivery_mode)
254{
255    HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_inj_irq "
256                "irq %d trig %d delive mode %d\n",
257                vector, trig_mode, delivery_mode);
258
259    switch ( delivery_mode )
260    {
261    case dest_Fixed:
262    case dest_LowestPrio:
263        if ( vlapic_set_irq(target, vector, trig_mode) )
264            vcpu_kick(vlapic_vcpu(target));
265        break;
266    default:
267        gdprintk(XENLOG_WARNING, "error delivery mode %d\n", delivery_mode);
268        break;
269    }
270}
271
272static uint32_t ioapic_get_delivery_bitmask(
273    struct hvm_hw_vioapic *vioapic, uint16_t dest, uint8_t dest_mode)
274{
275    uint32_t mask = 0;
276    struct vcpu *v;
277
278    HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_get_delivery_bitmask "
279                "dest %d dest_mode %d\n", dest, dest_mode);
280
281    if ( dest_mode == 0 ) /* Physical mode. */
282    {
283        if ( dest == 0xFF ) /* Broadcast. */
284        {
285            for_each_vcpu ( vioapic_domain(vioapic), v )
286                mask |= 1 << v->vcpu_id;
287            goto out;
288        }
289
290        for_each_vcpu ( vioapic_domain(vioapic), v )
291        {
292            if ( VLAPIC_ID(vcpu_vlapic(v)) == dest )
293            {
294                mask = 1 << v->vcpu_id;
295                break;
296            }
297        }
298    }
299    else if ( dest != 0 ) /* Logical mode, MDA non-zero. */
300    {
301        for_each_vcpu ( vioapic_domain(vioapic), v )
302            if ( vlapic_match_logical_addr(vcpu_vlapic(v), dest) )
303                mask |= 1 << v->vcpu_id;
304    }
305
306 out:
307    HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_get_delivery_bitmask mask %x\n",
308                mask);
309    return mask;
310}
311
312static inline int pit_channel0_enabled(void)
313{
314    PITState *pit = &current->domain->arch.hvm_domain.pl_time.vpit;
315    struct periodic_time *pt = &pit->pt[0];
316    return pt->enabled;
317}
318
319static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq)
320{
321    uint16_t dest = vioapic->redirtbl[irq].fields.dest_id;
322    uint8_t dest_mode = vioapic->redirtbl[irq].fields.dest_mode;
323    uint8_t delivery_mode = vioapic->redirtbl[irq].fields.delivery_mode;
324    uint8_t vector = vioapic->redirtbl[irq].fields.vector;
325    uint8_t trig_mode = vioapic->redirtbl[irq].fields.trig_mode;
326    uint32_t deliver_bitmask;
327    struct vlapic *target;
328    struct vcpu *v;
329
330    ASSERT(spin_is_locked(&vioapic_domain(vioapic)->arch.hvm_domain.irq_lock));
331
332    HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
333                "dest=%x dest_mode=%x delivery_mode=%x "
334                "vector=%x trig_mode=%x\n",
335                dest, dest_mode, delivery_mode, vector, trig_mode);
336
337    deliver_bitmask = ioapic_get_delivery_bitmask(vioapic, dest, dest_mode);
338    if ( !deliver_bitmask )
339    {
340        HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic deliver "
341                    "no target on destination\n");
342        return;
343    }
344
345    switch ( delivery_mode )
346    {
347    case dest_LowestPrio:
348    {
349#ifdef IRQ0_SPECIAL_ROUTING
350        /* Force round-robin to pick VCPU 0 */
351        if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
352        {
353            v = vioapic_domain(vioapic)->vcpu[0];
354            target = v ? vcpu_vlapic(v) : NULL;
355        }
356        else
357#endif
358            target = apic_round_robin(vioapic_domain(vioapic),
359                                      vector, deliver_bitmask);
360        if ( target != NULL )
361        {
362            ioapic_inj_irq(vioapic, target, vector, trig_mode, delivery_mode);
363        }
364        else
365        {
366            HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "null round robin: "
367                        "mask=%x vector=%x delivery_mode=%x\n",
368                        deliver_bitmask, vector, dest_LowestPrio);
369        }
370        break;
371    }
372
373    case dest_Fixed:
374    case dest_ExtINT:
375    {
376        uint8_t bit;
377        for ( bit = 0; deliver_bitmask != 0; bit++ )
378        {
379            if ( !(deliver_bitmask & (1 << bit)) )
380                continue;
381            deliver_bitmask &= ~(1 << bit);
382#ifdef IRQ0_SPECIAL_ROUTING
383            /* Do not deliver timer interrupts to VCPU != 0 */
384            if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
385                v = vioapic_domain(vioapic)->vcpu[0];
386            else
387#endif
388                v = vioapic_domain(vioapic)->vcpu[bit];
389            if ( v != NULL )
390            {
391                target = vcpu_vlapic(v);
392                ioapic_inj_irq(vioapic, target, vector,
393                               trig_mode, delivery_mode);
394            }
395        }
396        break;
397    }
398
399    case dest_SMI:
400    case dest_NMI:
401    case dest_INIT:
402    case dest__reserved_2:
403    default:
404        gdprintk(XENLOG_WARNING, "Unsupported delivery mode %d\n",
405                 delivery_mode);
406        break;
407    }
408}
409
410void vioapic_irq_positive_edge(struct domain *d, unsigned int irq)
411{
412    struct hvm_hw_vioapic *vioapic = domain_vioapic(d);
413    union vioapic_redir_entry *ent;
414
415    HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_irq_positive_edge irq %x", irq);
416
417    ASSERT(irq < VIOAPIC_NUM_PINS);
418    ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock));
419
420    ent = &vioapic->redirtbl[irq];
421    if ( ent->fields.mask )
422        return;
423
424    if ( ent->fields.trig_mode == VIOAPIC_EDGE_TRIG )
425    {
426        vioapic_deliver(vioapic, irq);
427    }
428    else if ( !ent->fields.remote_irr )
429    {
430        ent->fields.remote_irr = 1;
431        vioapic_deliver(vioapic, irq);
432    }
433}
434
435static int get_eoi_gsi(struct hvm_hw_vioapic *vioapic, int vector)
436{
437    int i;
438
439    for ( i = 0; i < VIOAPIC_NUM_PINS; i++ )
440        if ( vioapic->redirtbl[i].fields.vector == vector )
441            return i;
442
443    return -1;
444}
445
446void vioapic_update_EOI(struct domain *d, int vector)
447{
448    struct hvm_hw_vioapic *vioapic = domain_vioapic(d);
449    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
450    union vioapic_redir_entry *ent;
451    int gsi;
452
453    spin_lock(&d->arch.hvm_domain.irq_lock);
454
455    if ( (gsi = get_eoi_gsi(vioapic, vector)) == -1 )
456    {
457        gdprintk(XENLOG_WARNING, "Can't find redir item for %d EOI\n", vector);
458        goto out;
459    }
460
461    ent = &vioapic->redirtbl[gsi];
462
463    ent->fields.remote_irr = 0;
464    if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
465         !ent->fields.mask &&
466         hvm_irq->gsi_assert_count[gsi] )
467    {
468        ent->fields.remote_irr = 1;
469        vioapic_deliver(vioapic, gsi);
470    }
471
472 out:
473    spin_unlock(&d->arch.hvm_domain.irq_lock);
474}
475
476#ifdef HVM_DEBUG_SUSPEND
477static void ioapic_info(struct hvm_hw_vioapic *s)
478{
479    int i;
480    printk("*****ioapic state:*****\n");
481    printk("ioapic 0x%x.\n", s->ioregsel);
482    printk("ioapic 0x%x.\n", s->id);
483    printk("ioapic 0x%lx.\n", s->base_address);
484    for (i = 0; i < VIOAPIC_NUM_PINS; i++) {
485        printk("ioapic redirtbl[%d]:0x%"PRIx64"\n", i, s->redirtbl[i].bits);
486    }
487
488}
489#else
490static void ioapic_info(struct hvm_hw_vioapic *s)
491{
492}
493#endif
494
495
496static int ioapic_save(struct domain *d, hvm_domain_context_t *h)
497{
498    struct hvm_hw_vioapic *s = domain_vioapic(d);
499    ioapic_info(s);
500
501    /* save io-apic state*/
502    return ( hvm_save_entry(IOAPIC, 0, h, s) );
503}
504
505static int ioapic_load(struct domain *d, hvm_domain_context_t *h)
506{
507    struct hvm_hw_vioapic *s = domain_vioapic(d);
508   
509    /* restore ioapic state */
510    if ( hvm_load_entry(IOAPIC, h, s) != 0 )
511        return -EINVAL;
512
513    ioapic_info(s);
514    return 0;
515}
516
517HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM);
518
519void vioapic_init(struct domain *d)
520{
521    struct hvm_hw_vioapic *vioapic = domain_vioapic(d);
522    int i;
523
524    memset(vioapic, 0, sizeof(*vioapic));
525    for ( i = 0; i < VIOAPIC_NUM_PINS; i++ )
526        vioapic->redirtbl[i].fields.mask = 1;
527    vioapic->base_address = VIOAPIC_DEFAULT_BASE_ADDRESS;
528}
Note: See TracBrowser for help on using the repository browser.