1 | #ifndef _ASM_IO_H |
---|
2 | #define _ASM_IO_H |
---|
3 | |
---|
4 | #include <linux/string.h> |
---|
5 | #include <linux/compiler.h> |
---|
6 | |
---|
7 | /* |
---|
8 | * This file contains the definitions for the x86 IO instructions |
---|
9 | * inb/inw/inl/outb/outw/outl and the "string versions" of the same |
---|
10 | * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" |
---|
11 | * versions of the single-IO instructions (inb_p/inw_p/..). |
---|
12 | * |
---|
13 | * This file is not meant to be obfuscating: it's just complicated |
---|
14 | * to (a) handle it all in a way that makes gcc able to optimize it |
---|
15 | * as well as possible and (b) trying to avoid writing the same thing |
---|
16 | * over and over again with slight variations and possibly making a |
---|
17 | * mistake somewhere. |
---|
18 | */ |
---|
19 | |
---|
20 | /* |
---|
21 | * Thanks to James van Artsdalen for a better timing-fix than |
---|
22 | * the two short jumps: using outb's to a nonexistent port seems |
---|
23 | * to guarantee better timings even on fast machines. |
---|
24 | * |
---|
25 | * On the other hand, I'd like to be sure of a non-existent port: |
---|
26 | * I feel a bit unsafe about using 0x80 (should be safe, though) |
---|
27 | * |
---|
28 | * Linus |
---|
29 | */ |
---|
30 | |
---|
31 | /* |
---|
32 | * Bit simplified and optimized by Jan Hubicka |
---|
33 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. |
---|
34 | * |
---|
35 | * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, |
---|
36 | * isa_read[wl] and isa_write[wl] fixed |
---|
37 | * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> |
---|
38 | */ |
---|
39 | |
---|
40 | #define IO_SPACE_LIMIT 0xffff |
---|
41 | |
---|
42 | #define XQUAD_PORTIO_BASE 0xfe400000 |
---|
43 | #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ |
---|
44 | |
---|
45 | #ifdef __KERNEL__ |
---|
46 | |
---|
47 | #include <asm-generic/iomap.h> |
---|
48 | |
---|
49 | #include <linux/vmalloc.h> |
---|
50 | #include <asm/fixmap.h> |
---|
51 | |
---|
52 | /* |
---|
53 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem |
---|
54 | * access |
---|
55 | */ |
---|
56 | #define xlate_dev_mem_ptr(p, sz) ioremap(p, sz) |
---|
57 | #define xlate_dev_mem_ptr_unmap(p) iounmap(p) |
---|
58 | |
---|
59 | /* |
---|
60 | * Convert a virtual cached pointer to an uncached pointer |
---|
61 | */ |
---|
62 | #define xlate_dev_kmem_ptr(p) p |
---|
63 | |
---|
64 | /** |
---|
65 | * virt_to_phys - map virtual addresses to physical |
---|
66 | * @address: address to remap |
---|
67 | * |
---|
68 | * The returned physical address is the physical (CPU) mapping for |
---|
69 | * the memory address given. It is only valid to use this function on |
---|
70 | * addresses directly mapped or allocated via kmalloc. |
---|
71 | * |
---|
72 | * This function does not give bus mappings for DMA transfers. In |
---|
73 | * almost all conceivable cases a device driver should not be using |
---|
74 | * this function |
---|
75 | */ |
---|
76 | |
---|
77 | static inline unsigned long virt_to_phys(volatile void * address) |
---|
78 | { |
---|
79 | return __pa(address); |
---|
80 | } |
---|
81 | |
---|
82 | /** |
---|
83 | * phys_to_virt - map physical address to virtual |
---|
84 | * @address: address to remap |
---|
85 | * |
---|
86 | * The returned virtual address is a current CPU mapping for |
---|
87 | * the memory address given. It is only valid to use this function on |
---|
88 | * addresses that have a kernel mapping |
---|
89 | * |
---|
90 | * This function does not handle bus mappings for DMA transfers. In |
---|
91 | * almost all conceivable cases a device driver should not be using |
---|
92 | * this function |
---|
93 | */ |
---|
94 | |
---|
95 | static inline void * phys_to_virt(unsigned long address) |
---|
96 | { |
---|
97 | return __va(address); |
---|
98 | } |
---|
99 | |
---|
100 | /* |
---|
101 | * Change "struct page" to physical address. |
---|
102 | */ |
---|
103 | #define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) |
---|
104 | #define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page))) |
---|
105 | #define page_to_bus(page) (phys_to_machine(page_to_pseudophys(page))) |
---|
106 | |
---|
107 | #define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \ |
---|
108 | (unsigned long) bio_offset((bio))) |
---|
109 | #define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \ |
---|
110 | (unsigned long) (bv)->bv_offset) |
---|
111 | |
---|
112 | #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ |
---|
113 | (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \ |
---|
114 | ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \ |
---|
115 | bvec_to_pseudophys((vec2)))) |
---|
116 | |
---|
117 | extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); |
---|
118 | |
---|
119 | /** |
---|
120 | * ioremap - map bus memory into CPU space |
---|
121 | * @offset: bus address of the memory |
---|
122 | * @size: size of the resource to map |
---|
123 | * |
---|
124 | * ioremap performs a platform specific sequence of operations to |
---|
125 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
---|
126 | * writew/writel functions and the other mmio helpers. The returned |
---|
127 | * address is not guaranteed to be usable directly as a virtual |
---|
128 | * address. |
---|
129 | */ |
---|
130 | |
---|
131 | static inline void __iomem * ioremap(unsigned long offset, unsigned long size) |
---|
132 | { |
---|
133 | return __ioremap(offset, size, 0); |
---|
134 | } |
---|
135 | |
---|
136 | extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size); |
---|
137 | extern void iounmap(volatile void __iomem *addr); |
---|
138 | |
---|
139 | /* |
---|
140 | * bt_ioremap() and bt_iounmap() are for temporary early boot-time |
---|
141 | * mappings, before the real ioremap() is functional. |
---|
142 | * A boot-time mapping is currently limited to at most 16 pages. |
---|
143 | */ |
---|
144 | extern void *bt_ioremap(unsigned long offset, unsigned long size); |
---|
145 | extern void bt_iounmap(void *addr, unsigned long size); |
---|
146 | |
---|
147 | /* Use early IO mappings for DMI because it's initialized early */ |
---|
148 | #define dmi_ioremap bt_ioremap |
---|
149 | #define dmi_iounmap bt_iounmap |
---|
150 | #define dmi_alloc alloc_bootmem |
---|
151 | |
---|
152 | /* |
---|
153 | * ISA I/O bus memory addresses are 1:1 with the physical address. |
---|
154 | */ |
---|
155 | #define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x |
---|
156 | #define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x |
---|
157 | #define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x)) |
---|
158 | |
---|
159 | /* |
---|
160 | * However PCI ones are not necessarily 1:1 and therefore these interfaces |
---|
161 | * are forbidden in portable PCI drivers. |
---|
162 | * |
---|
163 | * Allow them on x86 for legacy drivers, though. |
---|
164 | */ |
---|
165 | #define virt_to_bus(_x) phys_to_machine(__pa(_x)) |
---|
166 | #define bus_to_virt(_x) __va(machine_to_phys(_x)) |
---|
167 | |
---|
168 | /* |
---|
169 | * readX/writeX() are used to access memory mapped devices. On some |
---|
170 | * architectures the memory mapped IO stuff needs to be accessed |
---|
171 | * differently. On the x86 architecture, we just read/write the |
---|
172 | * memory location directly. |
---|
173 | */ |
---|
174 | |
---|
175 | static inline unsigned char readb(const volatile void __iomem *addr) |
---|
176 | { |
---|
177 | return *(volatile unsigned char __force *) addr; |
---|
178 | } |
---|
179 | static inline unsigned short readw(const volatile void __iomem *addr) |
---|
180 | { |
---|
181 | return *(volatile unsigned short __force *) addr; |
---|
182 | } |
---|
183 | static inline unsigned int readl(const volatile void __iomem *addr) |
---|
184 | { |
---|
185 | return *(volatile unsigned int __force *) addr; |
---|
186 | } |
---|
187 | #define readb_relaxed(addr) readb(addr) |
---|
188 | #define readw_relaxed(addr) readw(addr) |
---|
189 | #define readl_relaxed(addr) readl(addr) |
---|
190 | #define __raw_readb readb |
---|
191 | #define __raw_readw readw |
---|
192 | #define __raw_readl readl |
---|
193 | |
---|
194 | static inline void writeb(unsigned char b, volatile void __iomem *addr) |
---|
195 | { |
---|
196 | *(volatile unsigned char __force *) addr = b; |
---|
197 | } |
---|
198 | static inline void writew(unsigned short b, volatile void __iomem *addr) |
---|
199 | { |
---|
200 | *(volatile unsigned short __force *) addr = b; |
---|
201 | } |
---|
202 | static inline void writel(unsigned int b, volatile void __iomem *addr) |
---|
203 | { |
---|
204 | *(volatile unsigned int __force *) addr = b; |
---|
205 | } |
---|
206 | #define __raw_writeb writeb |
---|
207 | #define __raw_writew writew |
---|
208 | #define __raw_writel writel |
---|
209 | |
---|
210 | #define mmiowb() |
---|
211 | |
---|
212 | static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) |
---|
213 | { |
---|
214 | memset((void __force *) addr, val, count); |
---|
215 | } |
---|
216 | static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) |
---|
217 | { |
---|
218 | __memcpy(dst, (void __force *) src, count); |
---|
219 | } |
---|
220 | static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) |
---|
221 | { |
---|
222 | __memcpy((void __force *) dst, src, count); |
---|
223 | } |
---|
224 | |
---|
225 | /* |
---|
226 | * ISA space is 'always mapped' on a typical x86 system, no need to |
---|
227 | * explicitly ioremap() it. The fact that the ISA IO space is mapped |
---|
228 | * to PAGE_OFFSET is pure coincidence - it does not mean ISA values |
---|
229 | * are physical addresses. The following constant pointer can be |
---|
230 | * used as the IO-area pointer (it can be iounmapped as well, so the |
---|
231 | * analogy with PCI is quite large): |
---|
232 | */ |
---|
233 | #define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN))) |
---|
234 | |
---|
235 | /* |
---|
236 | * Again, i386 does not require mem IO specific function. |
---|
237 | */ |
---|
238 | |
---|
239 | #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(b),(c),(d)) |
---|
240 | |
---|
241 | /** |
---|
242 | * check_signature - find BIOS signatures |
---|
243 | * @io_addr: mmio address to check |
---|
244 | * @signature: signature block |
---|
245 | * @length: length of signature |
---|
246 | * |
---|
247 | * Perform a signature comparison with the mmio address io_addr. This |
---|
248 | * address should have been obtained by ioremap. |
---|
249 | * Returns 1 on a match. |
---|
250 | */ |
---|
251 | |
---|
252 | static inline int check_signature(volatile void __iomem * io_addr, |
---|
253 | const unsigned char *signature, int length) |
---|
254 | { |
---|
255 | int retval = 0; |
---|
256 | do { |
---|
257 | if (readb(io_addr) != *signature) |
---|
258 | goto out; |
---|
259 | io_addr++; |
---|
260 | signature++; |
---|
261 | length--; |
---|
262 | } while (length); |
---|
263 | retval = 1; |
---|
264 | out: |
---|
265 | return retval; |
---|
266 | } |
---|
267 | |
---|
268 | /* |
---|
269 | * Cache management |
---|
270 | * |
---|
271 | * This needed for two cases |
---|
272 | * 1. Out of order aware processors |
---|
273 | * 2. Accidentally out of order processors (PPro errata #51) |
---|
274 | */ |
---|
275 | |
---|
276 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) |
---|
277 | |
---|
278 | static inline void flush_write_buffers(void) |
---|
279 | { |
---|
280 | __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory"); |
---|
281 | } |
---|
282 | |
---|
283 | #define dma_cache_inv(_start,_size) flush_write_buffers() |
---|
284 | #define dma_cache_wback(_start,_size) flush_write_buffers() |
---|
285 | #define dma_cache_wback_inv(_start,_size) flush_write_buffers() |
---|
286 | |
---|
287 | #else |
---|
288 | |
---|
289 | /* Nothing to do */ |
---|
290 | |
---|
291 | #define dma_cache_inv(_start,_size) do { } while (0) |
---|
292 | #define dma_cache_wback(_start,_size) do { } while (0) |
---|
293 | #define dma_cache_wback_inv(_start,_size) do { } while (0) |
---|
294 | #define flush_write_buffers() |
---|
295 | |
---|
296 | #endif |
---|
297 | |
---|
298 | #endif /* __KERNEL__ */ |
---|
299 | |
---|
300 | #ifdef SLOW_IO_BY_JUMPING |
---|
301 | #define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:" |
---|
302 | #else |
---|
303 | #define __SLOW_DOWN_IO "outb %%al,$0x80;" |
---|
304 | #endif |
---|
305 | |
---|
306 | static inline void slow_down_io(void) { |
---|
307 | __asm__ __volatile__( |
---|
308 | __SLOW_DOWN_IO |
---|
309 | #ifdef REALLY_SLOW_IO |
---|
310 | __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO |
---|
311 | #endif |
---|
312 | : : ); |
---|
313 | } |
---|
314 | |
---|
315 | #ifdef CONFIG_X86_NUMAQ |
---|
316 | extern void *xquad_portio; /* Where the IO area was mapped */ |
---|
317 | #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) |
---|
318 | #define __BUILDIO(bwl,bw,type) \ |
---|
319 | static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \ |
---|
320 | if (xquad_portio) \ |
---|
321 | write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \ |
---|
322 | else \ |
---|
323 | out##bwl##_local(value, port); \ |
---|
324 | } \ |
---|
325 | static inline void out##bwl(unsigned type value, int port) { \ |
---|
326 | out##bwl##_quad(value, port, 0); \ |
---|
327 | } \ |
---|
328 | static inline unsigned type in##bwl##_quad(int port, int quad) { \ |
---|
329 | if (xquad_portio) \ |
---|
330 | return read##bwl(XQUAD_PORT_ADDR(port, quad)); \ |
---|
331 | else \ |
---|
332 | return in##bwl##_local(port); \ |
---|
333 | } \ |
---|
334 | static inline unsigned type in##bwl(int port) { \ |
---|
335 | return in##bwl##_quad(port, 0); \ |
---|
336 | } |
---|
337 | #else |
---|
338 | #define __BUILDIO(bwl,bw,type) \ |
---|
339 | static inline void out##bwl(unsigned type value, int port) { \ |
---|
340 | out##bwl##_local(value, port); \ |
---|
341 | } \ |
---|
342 | static inline unsigned type in##bwl(int port) { \ |
---|
343 | return in##bwl##_local(port); \ |
---|
344 | } |
---|
345 | #endif |
---|
346 | |
---|
347 | |
---|
348 | #define BUILDIO(bwl,bw,type) \ |
---|
349 | static inline void out##bwl##_local(unsigned type value, int port) { \ |
---|
350 | __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \ |
---|
351 | } \ |
---|
352 | static inline unsigned type in##bwl##_local(int port) { \ |
---|
353 | unsigned type value; \ |
---|
354 | __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \ |
---|
355 | return value; \ |
---|
356 | } \ |
---|
357 | static inline void out##bwl##_local_p(unsigned type value, int port) { \ |
---|
358 | out##bwl##_local(value, port); \ |
---|
359 | slow_down_io(); \ |
---|
360 | } \ |
---|
361 | static inline unsigned type in##bwl##_local_p(int port) { \ |
---|
362 | unsigned type value = in##bwl##_local(port); \ |
---|
363 | slow_down_io(); \ |
---|
364 | return value; \ |
---|
365 | } \ |
---|
366 | __BUILDIO(bwl,bw,type) \ |
---|
367 | static inline void out##bwl##_p(unsigned type value, int port) { \ |
---|
368 | out##bwl(value, port); \ |
---|
369 | slow_down_io(); \ |
---|
370 | } \ |
---|
371 | static inline unsigned type in##bwl##_p(int port) { \ |
---|
372 | unsigned type value = in##bwl(port); \ |
---|
373 | slow_down_io(); \ |
---|
374 | return value; \ |
---|
375 | } \ |
---|
376 | static inline void outs##bwl(int port, const void *addr, unsigned long count) { \ |
---|
377 | __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \ |
---|
378 | } \ |
---|
379 | static inline void ins##bwl(int port, void *addr, unsigned long count) { \ |
---|
380 | __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \ |
---|
381 | } |
---|
382 | |
---|
383 | BUILDIO(b,b,char) |
---|
384 | BUILDIO(w,w,short) |
---|
385 | BUILDIO(l,,int) |
---|
386 | |
---|
387 | /* We will be supplying our own /dev/mem implementation */ |
---|
388 | #define ARCH_HAS_DEV_MEM |
---|
389 | |
---|
390 | #endif |
---|