1 | #ifndef _ASM_IA64_IO_H |
---|
2 | #define _ASM_IA64_IO_H |
---|
3 | |
---|
4 | /* |
---|
5 | * This file contains the definitions for the emulated IO instructions |
---|
6 | * inb/inw/inl/outb/outw/outl and the "string versions" of the same |
---|
7 | * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" |
---|
8 | * versions of the single-IO instructions (inb_p/inw_p/..). |
---|
9 | * |
---|
10 | * This file is not meant to be obfuscating: it's just complicated to |
---|
11 | * (a) handle it all in a way that makes gcc able to optimize it as |
---|
12 | * well as possible and (b) trying to avoid writing the same thing |
---|
13 | * over and over again with slight variations and possibly making a |
---|
14 | * mistake somewhere. |
---|
15 | * |
---|
16 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
---|
17 | * David Mosberger-Tang <davidm@hpl.hp.com> |
---|
18 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> |
---|
19 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> |
---|
20 | */ |
---|
21 | |
---|
22 | /* We don't use IO slowdowns on the ia64, but.. */ |
---|
23 | #define __SLOW_DOWN_IO do { } while (0) |
---|
24 | #define SLOW_DOWN_IO do { } while (0) |
---|
25 | |
---|
26 | #define __IA64_UNCACHED_OFFSET RGN_BASE(RGN_UNCACHED) |
---|
27 | |
---|
28 | /* |
---|
29 | * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but |
---|
30 | * large machines may have multiple other I/O spaces so we can't place any a priori limit |
---|
31 | * on IO_SPACE_LIMIT. These additional spaces are described in ACPI. |
---|
32 | */ |
---|
33 | #define IO_SPACE_LIMIT 0xffffffffffffffffUL |
---|
34 | |
---|
35 | #define MAX_IO_SPACES_BITS 4 |
---|
36 | #define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS) |
---|
37 | #define IO_SPACE_BITS 24 |
---|
38 | #define IO_SPACE_SIZE (1UL << IO_SPACE_BITS) |
---|
39 | |
---|
40 | #define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS) |
---|
41 | #define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS) |
---|
42 | #define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1)) |
---|
43 | |
---|
44 | #define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | ((p) & 0xfff)) |
---|
45 | |
---|
46 | struct io_space { |
---|
47 | unsigned long mmio_base; /* base in MMIO space */ |
---|
48 | int sparse; |
---|
49 | }; |
---|
50 | |
---|
51 | extern struct io_space io_space[]; |
---|
52 | extern unsigned int num_io_spaces; |
---|
53 | |
---|
54 | # ifdef __KERNEL__ |
---|
55 | |
---|
56 | /* |
---|
57 | * All MMIO iomem cookies are in region 6; anything less is a PIO cookie: |
---|
58 | * 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap) |
---|
59 | * 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port) |
---|
60 | * |
---|
61 | * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch |
---|
62 | * code that uses bare port numbers without the prerequisite pci_iomap(). |
---|
63 | */ |
---|
64 | #define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS)) |
---|
65 | #define PIO_MASK (PIO_OFFSET - 1) |
---|
66 | #define PIO_RESERVED __IA64_UNCACHED_OFFSET |
---|
67 | #define HAVE_ARCH_PIO_SIZE |
---|
68 | |
---|
69 | #include <asm/hypervisor.h> |
---|
70 | #include <asm/intrinsics.h> |
---|
71 | #include <asm/machvec.h> |
---|
72 | #include <asm/page.h> |
---|
73 | #include <asm/privop.h> |
---|
74 | #include <asm/system.h> |
---|
75 | #include <asm-generic/iomap.h> |
---|
76 | |
---|
77 | /* |
---|
78 | * Change virtual addresses to physical addresses and vv. |
---|
79 | */ |
---|
80 | static inline unsigned long |
---|
81 | virt_to_phys (volatile void *address) |
---|
82 | { |
---|
83 | return (unsigned long) address - PAGE_OFFSET; |
---|
84 | } |
---|
85 | |
---|
86 | static inline void* |
---|
87 | phys_to_virt (unsigned long address) |
---|
88 | { |
---|
89 | return (void *) (address + PAGE_OFFSET); |
---|
90 | } |
---|
91 | |
---|
92 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE |
---|
93 | extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size); |
---|
94 | extern int valid_phys_addr_range (unsigned long addr, size_t count); /* efi.c */ |
---|
95 | extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count); |
---|
96 | |
---|
97 | /* |
---|
98 | * The following two macros are deprecated and scheduled for removal. |
---|
99 | * Please use the PCI-DMA interface defined in <asm/pci.h> instead. |
---|
100 | */ |
---|
101 | #ifndef CONFIG_XEN |
---|
102 | #define bus_to_virt phys_to_virt |
---|
103 | #define virt_to_bus virt_to_phys |
---|
104 | #define page_to_bus page_to_phys |
---|
105 | #else |
---|
106 | #define bus_to_virt(bus) \ |
---|
107 | phys_to_virt(machine_to_phys_for_dma(bus)) |
---|
108 | #define virt_to_bus(virt) \ |
---|
109 | phys_to_machine_for_dma(virt_to_phys(virt)) |
---|
110 | #define page_to_bus(page) \ |
---|
111 | phys_to_machine_for_dma(page_to_pseudophys(page)) |
---|
112 | |
---|
113 | #define page_to_pseudophys(page) \ |
---|
114 | ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) |
---|
115 | |
---|
116 | /* |
---|
117 | * Drivers that use page_to_phys() for bus addresses are broken. |
---|
118 | * This includes: |
---|
119 | * drivers/ide/cris/ide-cris.c |
---|
120 | * drivers/scsi/dec_esp.c |
---|
121 | */ |
---|
122 | #define page_to_phys(page) (page_to_pseudophys(page)) |
---|
123 | #define bvec_to_bus(bv) (page_to_bus((bv)->bv_page) + \ |
---|
124 | (unsigned long) (bv)->bv_offset) |
---|
125 | #define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \ |
---|
126 | (unsigned long) bio_offset((bio))) |
---|
127 | #define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \ |
---|
128 | (unsigned long) (bv)->bv_offset) |
---|
129 | #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ |
---|
130 | (((bvec_to_bus((vec1)) + (vec1)->bv_len) == bvec_to_bus((vec2))) && \ |
---|
131 | ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \ |
---|
132 | bvec_to_pseudophys((vec2)))) |
---|
133 | |
---|
134 | /* We will be supplying our own /dev/mem implementation */ |
---|
135 | #define ARCH_HAS_DEV_MEM |
---|
136 | #define ARCH_HAS_DEV_MEM_MMAP_MEM |
---|
137 | int xen_mmap_mem(struct file * file, struct vm_area_struct * vma); |
---|
138 | #endif /* CONFIG_XEN */ |
---|
139 | |
---|
140 | # endif /* KERNEL */ |
---|
141 | |
---|
142 | /* |
---|
143 | * Memory fence w/accept. This should never be used in code that is |
---|
144 | * not IA-64 specific. |
---|
145 | */ |
---|
146 | #define __ia64_mf_a() ia64_mfa() |
---|
147 | |
---|
148 | /** |
---|
149 | * ___ia64_mmiowb - I/O write barrier |
---|
150 | * |
---|
151 | * Ensure ordering of I/O space writes. This will make sure that writes |
---|
152 | * following the barrier will arrive after all previous writes. For most |
---|
153 | * ia64 platforms, this is a simple 'mf.a' instruction. |
---|
154 | * |
---|
155 | * See Documentation/DocBook/deviceiobook.tmpl for more information. |
---|
156 | */ |
---|
157 | static inline void ___ia64_mmiowb(void) |
---|
158 | { |
---|
159 | ia64_mfa(); |
---|
160 | } |
---|
161 | |
---|
162 | static inline void* |
---|
163 | __ia64_mk_io_addr (unsigned long port) |
---|
164 | { |
---|
165 | struct io_space *space; |
---|
166 | unsigned long offset; |
---|
167 | |
---|
168 | space = &io_space[IO_SPACE_NR(port)]; |
---|
169 | port = IO_SPACE_PORT(port); |
---|
170 | if (space->sparse) |
---|
171 | offset = IO_SPACE_SPARSE_ENCODING(port); |
---|
172 | else |
---|
173 | offset = port; |
---|
174 | |
---|
175 | return (void *) (space->mmio_base | offset); |
---|
176 | } |
---|
177 | |
---|
178 | #define __ia64_inb ___ia64_inb |
---|
179 | #define __ia64_inw ___ia64_inw |
---|
180 | #define __ia64_inl ___ia64_inl |
---|
181 | #define __ia64_outb ___ia64_outb |
---|
182 | #define __ia64_outw ___ia64_outw |
---|
183 | #define __ia64_outl ___ia64_outl |
---|
184 | #define __ia64_readb ___ia64_readb |
---|
185 | #define __ia64_readw ___ia64_readw |
---|
186 | #define __ia64_readl ___ia64_readl |
---|
187 | #define __ia64_readq ___ia64_readq |
---|
188 | #define __ia64_readb_relaxed ___ia64_readb |
---|
189 | #define __ia64_readw_relaxed ___ia64_readw |
---|
190 | #define __ia64_readl_relaxed ___ia64_readl |
---|
191 | #define __ia64_readq_relaxed ___ia64_readq |
---|
192 | #define __ia64_writeb ___ia64_writeb |
---|
193 | #define __ia64_writew ___ia64_writew |
---|
194 | #define __ia64_writel ___ia64_writel |
---|
195 | #define __ia64_writeq ___ia64_writeq |
---|
196 | #define __ia64_mmiowb ___ia64_mmiowb |
---|
197 | |
---|
198 | /* |
---|
199 | * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure |
---|
200 | * that the access has completed before executing other I/O accesses. Since we're doing |
---|
201 | * the accesses through an uncachable (UC) translation, the CPU will execute them in |
---|
202 | * program order. However, we still need to tell the compiler not to shuffle them around |
---|
203 | * during optimization, which is why we use "volatile" pointers. |
---|
204 | */ |
---|
205 | |
---|
206 | static inline unsigned int |
---|
207 | ___ia64_inb (unsigned long port) |
---|
208 | { |
---|
209 | volatile unsigned char *addr = __ia64_mk_io_addr(port); |
---|
210 | unsigned char ret; |
---|
211 | |
---|
212 | ret = *addr; |
---|
213 | __ia64_mf_a(); |
---|
214 | return ret; |
---|
215 | } |
---|
216 | |
---|
217 | static inline unsigned int |
---|
218 | ___ia64_inw (unsigned long port) |
---|
219 | { |
---|
220 | volatile unsigned short *addr = __ia64_mk_io_addr(port); |
---|
221 | unsigned short ret; |
---|
222 | |
---|
223 | ret = *addr; |
---|
224 | __ia64_mf_a(); |
---|
225 | return ret; |
---|
226 | } |
---|
227 | |
---|
228 | static inline unsigned int |
---|
229 | ___ia64_inl (unsigned long port) |
---|
230 | { |
---|
231 | volatile unsigned int *addr = __ia64_mk_io_addr(port); |
---|
232 | unsigned int ret; |
---|
233 | |
---|
234 | ret = *addr; |
---|
235 | __ia64_mf_a(); |
---|
236 | return ret; |
---|
237 | } |
---|
238 | |
---|
239 | static inline void |
---|
240 | ___ia64_outb (unsigned char val, unsigned long port) |
---|
241 | { |
---|
242 | volatile unsigned char *addr = __ia64_mk_io_addr(port); |
---|
243 | |
---|
244 | *addr = val; |
---|
245 | __ia64_mf_a(); |
---|
246 | } |
---|
247 | |
---|
248 | static inline void |
---|
249 | ___ia64_outw (unsigned short val, unsigned long port) |
---|
250 | { |
---|
251 | volatile unsigned short *addr = __ia64_mk_io_addr(port); |
---|
252 | |
---|
253 | *addr = val; |
---|
254 | __ia64_mf_a(); |
---|
255 | } |
---|
256 | |
---|
257 | static inline void |
---|
258 | ___ia64_outl (unsigned int val, unsigned long port) |
---|
259 | { |
---|
260 | volatile unsigned int *addr = __ia64_mk_io_addr(port); |
---|
261 | |
---|
262 | *addr = val; |
---|
263 | __ia64_mf_a(); |
---|
264 | } |
---|
265 | |
---|
266 | static inline void |
---|
267 | __insb (unsigned long port, void *dst, unsigned long count) |
---|
268 | { |
---|
269 | unsigned char *dp = dst; |
---|
270 | |
---|
271 | while (count--) |
---|
272 | *dp++ = platform_inb(port); |
---|
273 | } |
---|
274 | |
---|
275 | static inline void |
---|
276 | __insw (unsigned long port, void *dst, unsigned long count) |
---|
277 | { |
---|
278 | unsigned short *dp = dst; |
---|
279 | |
---|
280 | while (count--) |
---|
281 | *dp++ = platform_inw(port); |
---|
282 | } |
---|
283 | |
---|
284 | static inline void |
---|
285 | __insl (unsigned long port, void *dst, unsigned long count) |
---|
286 | { |
---|
287 | unsigned int *dp = dst; |
---|
288 | |
---|
289 | while (count--) |
---|
290 | *dp++ = platform_inl(port); |
---|
291 | } |
---|
292 | |
---|
293 | static inline void |
---|
294 | __outsb (unsigned long port, const void *src, unsigned long count) |
---|
295 | { |
---|
296 | const unsigned char *sp = src; |
---|
297 | |
---|
298 | while (count--) |
---|
299 | platform_outb(*sp++, port); |
---|
300 | } |
---|
301 | |
---|
302 | static inline void |
---|
303 | __outsw (unsigned long port, const void *src, unsigned long count) |
---|
304 | { |
---|
305 | const unsigned short *sp = src; |
---|
306 | |
---|
307 | while (count--) |
---|
308 | platform_outw(*sp++, port); |
---|
309 | } |
---|
310 | |
---|
311 | static inline void |
---|
312 | __outsl (unsigned long port, const void *src, unsigned long count) |
---|
313 | { |
---|
314 | const unsigned int *sp = src; |
---|
315 | |
---|
316 | while (count--) |
---|
317 | platform_outl(*sp++, port); |
---|
318 | } |
---|
319 | |
---|
320 | /* |
---|
321 | * Unfortunately, some platforms are broken and do not follow the IA-64 architecture |
---|
322 | * specification regarding legacy I/O support. Thus, we have to make these operations |
---|
323 | * platform dependent... |
---|
324 | */ |
---|
325 | #define __inb platform_inb |
---|
326 | #define __inw platform_inw |
---|
327 | #define __inl platform_inl |
---|
328 | #define __outb platform_outb |
---|
329 | #define __outw platform_outw |
---|
330 | #define __outl platform_outl |
---|
331 | #define __mmiowb platform_mmiowb |
---|
332 | |
---|
333 | #define inb(p) __inb(p) |
---|
334 | #define inw(p) __inw(p) |
---|
335 | #define inl(p) __inl(p) |
---|
336 | #define insb(p,d,c) __insb(p,d,c) |
---|
337 | #define insw(p,d,c) __insw(p,d,c) |
---|
338 | #define insl(p,d,c) __insl(p,d,c) |
---|
339 | #define outb(v,p) __outb(v,p) |
---|
340 | #define outw(v,p) __outw(v,p) |
---|
341 | #define outl(v,p) __outl(v,p) |
---|
342 | #define outsb(p,s,c) __outsb(p,s,c) |
---|
343 | #define outsw(p,s,c) __outsw(p,s,c) |
---|
344 | #define outsl(p,s,c) __outsl(p,s,c) |
---|
345 | #define mmiowb() __mmiowb() |
---|
346 | |
---|
347 | /* |
---|
348 | * The address passed to these functions are ioremap()ped already. |
---|
349 | * |
---|
350 | * We need these to be machine vectors since some platforms don't provide |
---|
351 | * DMA coherence via PIO reads (PCI drivers and the spec imply that this is |
---|
352 | * a good idea). Writes are ok though for all existing ia64 platforms (and |
---|
353 | * hopefully it'll stay that way). |
---|
354 | */ |
---|
355 | static inline unsigned char |
---|
356 | ___ia64_readb (const volatile void __iomem *addr) |
---|
357 | { |
---|
358 | return *(volatile unsigned char __force *)addr; |
---|
359 | } |
---|
360 | |
---|
361 | static inline unsigned short |
---|
362 | ___ia64_readw (const volatile void __iomem *addr) |
---|
363 | { |
---|
364 | return *(volatile unsigned short __force *)addr; |
---|
365 | } |
---|
366 | |
---|
367 | static inline unsigned int |
---|
368 | ___ia64_readl (const volatile void __iomem *addr) |
---|
369 | { |
---|
370 | return *(volatile unsigned int __force *) addr; |
---|
371 | } |
---|
372 | |
---|
373 | static inline unsigned long |
---|
374 | ___ia64_readq (const volatile void __iomem *addr) |
---|
375 | { |
---|
376 | return *(volatile unsigned long __force *) addr; |
---|
377 | } |
---|
378 | |
---|
379 | static inline void |
---|
380 | __writeb (unsigned char val, volatile void __iomem *addr) |
---|
381 | { |
---|
382 | *(volatile unsigned char __force *) addr = val; |
---|
383 | } |
---|
384 | |
---|
385 | static inline void |
---|
386 | __writew (unsigned short val, volatile void __iomem *addr) |
---|
387 | { |
---|
388 | *(volatile unsigned short __force *) addr = val; |
---|
389 | } |
---|
390 | |
---|
391 | static inline void |
---|
392 | __writel (unsigned int val, volatile void __iomem *addr) |
---|
393 | { |
---|
394 | *(volatile unsigned int __force *) addr = val; |
---|
395 | } |
---|
396 | |
---|
397 | static inline void |
---|
398 | __writeq (unsigned long val, volatile void __iomem *addr) |
---|
399 | { |
---|
400 | *(volatile unsigned long __force *) addr = val; |
---|
401 | } |
---|
402 | |
---|
403 | #define __readb platform_readb |
---|
404 | #define __readw platform_readw |
---|
405 | #define __readl platform_readl |
---|
406 | #define __readq platform_readq |
---|
407 | #define __readb_relaxed platform_readb_relaxed |
---|
408 | #define __readw_relaxed platform_readw_relaxed |
---|
409 | #define __readl_relaxed platform_readl_relaxed |
---|
410 | #define __readq_relaxed platform_readq_relaxed |
---|
411 | |
---|
412 | #define readb(a) __readb((a)) |
---|
413 | #define readw(a) __readw((a)) |
---|
414 | #define readl(a) __readl((a)) |
---|
415 | #define readq(a) __readq((a)) |
---|
416 | #define readb_relaxed(a) __readb_relaxed((a)) |
---|
417 | #define readw_relaxed(a) __readw_relaxed((a)) |
---|
418 | #define readl_relaxed(a) __readl_relaxed((a)) |
---|
419 | #define readq_relaxed(a) __readq_relaxed((a)) |
---|
420 | #define __raw_readb readb |
---|
421 | #define __raw_readw readw |
---|
422 | #define __raw_readl readl |
---|
423 | #define __raw_readq readq |
---|
424 | #define __raw_readb_relaxed readb_relaxed |
---|
425 | #define __raw_readw_relaxed readw_relaxed |
---|
426 | #define __raw_readl_relaxed readl_relaxed |
---|
427 | #define __raw_readq_relaxed readq_relaxed |
---|
428 | #define writeb(v,a) __writeb((v), (a)) |
---|
429 | #define writew(v,a) __writew((v), (a)) |
---|
430 | #define writel(v,a) __writel((v), (a)) |
---|
431 | #define writeq(v,a) __writeq((v), (a)) |
---|
432 | #define __raw_writeb writeb |
---|
433 | #define __raw_writew writew |
---|
434 | #define __raw_writel writel |
---|
435 | #define __raw_writeq writeq |
---|
436 | |
---|
437 | #ifndef inb_p |
---|
438 | # define inb_p inb |
---|
439 | #endif |
---|
440 | #ifndef inw_p |
---|
441 | # define inw_p inw |
---|
442 | #endif |
---|
443 | #ifndef inl_p |
---|
444 | # define inl_p inl |
---|
445 | #endif |
---|
446 | |
---|
447 | #ifndef outb_p |
---|
448 | # define outb_p outb |
---|
449 | #endif |
---|
450 | #ifndef outw_p |
---|
451 | # define outw_p outw |
---|
452 | #endif |
---|
453 | #ifndef outl_p |
---|
454 | # define outl_p outl |
---|
455 | #endif |
---|
456 | |
---|
457 | extern void __iomem * ioremap(unsigned long offset, unsigned long size); |
---|
458 | extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); |
---|
459 | |
---|
460 | static inline void |
---|
461 | iounmap (volatile void __iomem *addr) |
---|
462 | { |
---|
463 | } |
---|
464 | |
---|
465 | /* Use normal IO mappings for DMI */ |
---|
466 | #define dmi_ioremap ioremap |
---|
467 | #define dmi_iounmap(x,l) iounmap(x) |
---|
468 | #define dmi_alloc(l) kmalloc(l, GFP_ATOMIC) |
---|
469 | |
---|
470 | # ifdef __KERNEL__ |
---|
471 | |
---|
472 | /* |
---|
473 | * String version of IO memory access ops: |
---|
474 | */ |
---|
475 | extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n); |
---|
476 | extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n); |
---|
477 | extern void memset_io(volatile void __iomem *s, int c, long n); |
---|
478 | |
---|
479 | #define dma_cache_inv(_start,_size) do { } while (0) |
---|
480 | #define dma_cache_wback(_start,_size) do { } while (0) |
---|
481 | #define dma_cache_wback_inv(_start,_size) do { } while (0) |
---|
482 | |
---|
483 | # endif /* __KERNEL__ */ |
---|
484 | |
---|
485 | /* |
---|
486 | * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that |
---|
487 | * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64). |
---|
488 | * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on |
---|
489 | * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing |
---|
490 | * over BIO-level virtual merging. |
---|
491 | */ |
---|
492 | extern unsigned long ia64_max_iommu_merge_mask; |
---|
493 | #if 1 |
---|
494 | #define BIO_VMERGE_BOUNDARY 0 |
---|
495 | #else |
---|
496 | /* |
---|
497 | * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be |
---|
498 | * replaced by dma_merge_mask() or something of that sort. Note: the only way |
---|
499 | * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets |
---|
500 | * expanded into: |
---|
501 | * |
---|
502 | * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask) |
---|
503 | * |
---|
504 | * which is precisely what we want. |
---|
505 | */ |
---|
506 | #define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1) |
---|
507 | #endif |
---|
508 | |
---|
509 | #endif /* _ASM_IA64_IO_H */ |
---|