1 | #ifndef _ASM_IO_H |
---|
2 | #define _ASM_IO_H |
---|
3 | |
---|
4 | #include <asm/fixmap.h> |
---|
5 | |
---|
6 | /* |
---|
7 | * This file contains the definitions for the x86 IO instructions |
---|
8 | * inb/inw/inl/outb/outw/outl and the "string versions" of the same |
---|
9 | * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" |
---|
10 | * versions of the single-IO instructions (inb_p/inw_p/..). |
---|
11 | * |
---|
12 | * This file is not meant to be obfuscating: it's just complicated |
---|
13 | * to (a) handle it all in a way that makes gcc able to optimize it |
---|
14 | * as well as possible and (b) trying to avoid writing the same thing |
---|
15 | * over and over again with slight variations and possibly making a |
---|
16 | * mistake somewhere. |
---|
17 | */ |
---|
18 | |
---|
19 | /* |
---|
20 | * Thanks to James van Artsdalen for a better timing-fix than |
---|
21 | * the two short jumps: using outb's to a nonexistent port seems |
---|
22 | * to guarantee better timings even on fast machines. |
---|
23 | * |
---|
24 | * On the other hand, I'd like to be sure of a non-existent port: |
---|
25 | * I feel a bit unsafe about using 0x80 (should be safe, though) |
---|
26 | * |
---|
27 | * Linus |
---|
28 | */ |
---|
29 | |
---|
30 | /* |
---|
31 | * Bit simplified and optimized by Jan Hubicka |
---|
32 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. |
---|
33 | * |
---|
34 | * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, |
---|
35 | * isa_read[wl] and isa_write[wl] fixed |
---|
36 | * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> |
---|
37 | */ |
---|
38 | |
---|
39 | #define __SLOW_DOWN_IO "\noutb %%al,$0x80" |
---|
40 | |
---|
41 | #ifdef REALLY_SLOW_IO |
---|
42 | #define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO |
---|
43 | #else |
---|
44 | #define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO |
---|
45 | #endif |
---|
46 | |
---|
47 | /* |
---|
48 | * Talk about misusing macros.. |
---|
49 | */ |
---|
50 | #define __OUT1(s,x) \ |
---|
51 | static inline void out##s(unsigned x value, unsigned short port) { |
---|
52 | |
---|
53 | #define __OUT2(s,s1,s2) \ |
---|
54 | __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" |
---|
55 | |
---|
56 | #define __OUT(s,s1,x) \ |
---|
57 | __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ |
---|
58 | __OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \ |
---|
59 | |
---|
60 | #define __IN1(s) \ |
---|
61 | static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; |
---|
62 | |
---|
63 | #define __IN2(s,s1,s2) \ |
---|
64 | __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" |
---|
65 | |
---|
66 | #define __IN(s,s1,i...) \ |
---|
67 | __IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ |
---|
68 | __IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ |
---|
69 | |
---|
70 | #define __INS(s) \ |
---|
71 | static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ |
---|
72 | { __asm__ __volatile__ ("rep ; ins" #s \ |
---|
73 | : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } |
---|
74 | |
---|
75 | #define __OUTS(s) \ |
---|
76 | static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ |
---|
77 | { __asm__ __volatile__ ("rep ; outs" #s \ |
---|
78 | : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } |
---|
79 | |
---|
80 | #define RETURN_TYPE unsigned char |
---|
81 | __IN(b,"") |
---|
82 | #undef RETURN_TYPE |
---|
83 | #define RETURN_TYPE unsigned short |
---|
84 | __IN(w,"") |
---|
85 | #undef RETURN_TYPE |
---|
86 | #define RETURN_TYPE unsigned int |
---|
87 | __IN(l,"") |
---|
88 | #undef RETURN_TYPE |
---|
89 | |
---|
90 | __OUT(b,"b",char) |
---|
91 | __OUT(w,"w",short) |
---|
92 | __OUT(l,,int) |
---|
93 | |
---|
94 | __INS(b) |
---|
95 | __INS(w) |
---|
96 | __INS(l) |
---|
97 | |
---|
98 | __OUTS(b) |
---|
99 | __OUTS(w) |
---|
100 | __OUTS(l) |
---|
101 | |
---|
102 | #define IO_SPACE_LIMIT 0xffff |
---|
103 | |
---|
104 | #if defined(__KERNEL__) && __x86_64__ |
---|
105 | |
---|
106 | #include <linux/vmalloc.h> |
---|
107 | |
---|
108 | #ifndef __i386__ |
---|
109 | /* |
---|
110 | * Change virtual addresses to physical addresses and vv. |
---|
111 | * These are pretty trivial |
---|
112 | */ |
---|
113 | static inline unsigned long virt_to_phys(volatile void * address) |
---|
114 | { |
---|
115 | return __pa(address); |
---|
116 | } |
---|
117 | |
---|
118 | static inline void * phys_to_virt(unsigned long address) |
---|
119 | { |
---|
120 | return __va(address); |
---|
121 | } |
---|
122 | |
---|
123 | #define virt_to_bus(_x) phys_to_machine(__pa(_x)) |
---|
124 | #define bus_to_virt(_x) __va(machine_to_phys(_x)) |
---|
125 | #endif |
---|
126 | |
---|
127 | /* |
---|
128 | * Change "struct page" to physical address. |
---|
129 | */ |
---|
130 | #define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) |
---|
131 | #define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page))) |
---|
132 | #define page_to_bus(page) (phys_to_machine(page_to_pseudophys(page))) |
---|
133 | |
---|
134 | #define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \ |
---|
135 | (unsigned long) bio_offset((bio))) |
---|
136 | #define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \ |
---|
137 | (unsigned long) (bv)->bv_offset) |
---|
138 | |
---|
139 | #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ |
---|
140 | (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \ |
---|
141 | ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \ |
---|
142 | bvec_to_pseudophys((vec2)))) |
---|
143 | |
---|
144 | #include <asm-generic/iomap.h> |
---|
145 | |
---|
146 | extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags); |
---|
147 | |
---|
148 | static inline void __iomem * ioremap (unsigned long offset, unsigned long size) |
---|
149 | { |
---|
150 | return __ioremap(offset, size, 0); |
---|
151 | } |
---|
152 | |
---|
153 | extern void *bt_ioremap(unsigned long addr, unsigned long size); |
---|
154 | extern void bt_iounmap(void *addr, unsigned long size); |
---|
155 | #define early_ioremap bt_ioremap |
---|
156 | #define early_iounmap bt_iounmap |
---|
157 | |
---|
158 | /* |
---|
159 | * This one maps high address device memory and turns off caching for that area. |
---|
160 | * it's useful if some control registers are in such an area and write combining |
---|
161 | * or read caching is not desirable: |
---|
162 | */ |
---|
163 | extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); |
---|
164 | extern void iounmap(volatile void __iomem *addr); |
---|
165 | |
---|
166 | /* |
---|
167 | * ISA I/O bus memory addresses are 1:1 with the physical address. |
---|
168 | */ |
---|
169 | |
---|
170 | #define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x |
---|
171 | #define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x |
---|
172 | #define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x)) |
---|
173 | |
---|
174 | /* |
---|
175 | * However PCI ones are not necessarily 1:1 and therefore these interfaces |
---|
176 | * are forbidden in portable PCI drivers. |
---|
177 | * |
---|
178 | * Allow them on x86 for legacy drivers, though. |
---|
179 | */ |
---|
180 | #define virt_to_bus(_x) phys_to_machine(__pa(_x)) |
---|
181 | #define bus_to_virt(_x) __va(machine_to_phys(_x)) |
---|
182 | |
---|
183 | /* |
---|
184 | * readX/writeX() are used to access memory mapped devices. On some |
---|
185 | * architectures the memory mapped IO stuff needs to be accessed |
---|
186 | * differently. On the x86 architecture, we just read/write the |
---|
187 | * memory location directly. |
---|
188 | */ |
---|
189 | |
---|
190 | static inline __u8 __readb(const volatile void __iomem *addr) |
---|
191 | { |
---|
192 | return *(__force volatile __u8 *)addr; |
---|
193 | } |
---|
194 | static inline __u16 __readw(const volatile void __iomem *addr) |
---|
195 | { |
---|
196 | return *(__force volatile __u16 *)addr; |
---|
197 | } |
---|
198 | static __always_inline __u32 __readl(const volatile void __iomem *addr) |
---|
199 | { |
---|
200 | return *(__force volatile __u32 *)addr; |
---|
201 | } |
---|
202 | static inline __u64 __readq(const volatile void __iomem *addr) |
---|
203 | { |
---|
204 | return *(__force volatile __u64 *)addr; |
---|
205 | } |
---|
206 | #define readb(x) __readb(x) |
---|
207 | #define readw(x) __readw(x) |
---|
208 | #define readl(x) __readl(x) |
---|
209 | #define readq(x) __readq(x) |
---|
210 | #define readb_relaxed(a) readb(a) |
---|
211 | #define readw_relaxed(a) readw(a) |
---|
212 | #define readl_relaxed(a) readl(a) |
---|
213 | #define readq_relaxed(a) readq(a) |
---|
214 | #define __raw_readb readb |
---|
215 | #define __raw_readw readw |
---|
216 | #define __raw_readl readl |
---|
217 | #define __raw_readq readq |
---|
218 | |
---|
219 | #define mmiowb() |
---|
220 | |
---|
221 | static inline void __writel(__u32 b, volatile void __iomem *addr) |
---|
222 | { |
---|
223 | *(__force volatile __u32 *)addr = b; |
---|
224 | } |
---|
225 | static inline void __writeq(__u64 b, volatile void __iomem *addr) |
---|
226 | { |
---|
227 | *(__force volatile __u64 *)addr = b; |
---|
228 | } |
---|
229 | static inline void __writeb(__u8 b, volatile void __iomem *addr) |
---|
230 | { |
---|
231 | *(__force volatile __u8 *)addr = b; |
---|
232 | } |
---|
233 | static inline void __writew(__u16 b, volatile void __iomem *addr) |
---|
234 | { |
---|
235 | *(__force volatile __u16 *)addr = b; |
---|
236 | } |
---|
237 | #define writeq(val,addr) __writeq((val),(addr)) |
---|
238 | #define writel(val,addr) __writel((val),(addr)) |
---|
239 | #define writew(val,addr) __writew((val),(addr)) |
---|
240 | #define writeb(val,addr) __writeb((val),(addr)) |
---|
241 | #define __raw_writeb writeb |
---|
242 | #define __raw_writew writew |
---|
243 | #define __raw_writel writel |
---|
244 | #define __raw_writeq writeq |
---|
245 | |
---|
246 | void __memcpy_fromio(void*,unsigned long,unsigned); |
---|
247 | void __memcpy_toio(unsigned long,const void*,unsigned); |
---|
248 | |
---|
249 | static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len) |
---|
250 | { |
---|
251 | __memcpy_fromio(to,(unsigned long)from,len); |
---|
252 | } |
---|
253 | static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len) |
---|
254 | { |
---|
255 | __memcpy_toio((unsigned long)to,from,len); |
---|
256 | } |
---|
257 | |
---|
258 | void memset_io(volatile void __iomem *a, int b, size_t c); |
---|
259 | |
---|
260 | /* |
---|
261 | * ISA space is 'always mapped' on a typical x86 system, no need to |
---|
262 | * explicitly ioremap() it. The fact that the ISA IO space is mapped |
---|
263 | * to PAGE_OFFSET is pure coincidence - it does not mean ISA values |
---|
264 | * are physical addresses. The following constant pointer can be |
---|
265 | * used as the IO-area pointer (it can be iounmapped as well, so the |
---|
266 | * analogy with PCI is quite large): |
---|
267 | */ |
---|
268 | #define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN))) |
---|
269 | |
---|
270 | /* |
---|
271 | * Again, x86-64 does not require mem IO specific function. |
---|
272 | */ |
---|
273 | |
---|
274 | #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d)) |
---|
275 | |
---|
276 | /** |
---|
277 | * check_signature - find BIOS signatures |
---|
278 | * @io_addr: mmio address to check |
---|
279 | * @signature: signature block |
---|
280 | * @length: length of signature |
---|
281 | * |
---|
282 | * Perform a signature comparison with the mmio address io_addr. This |
---|
283 | * address should have been obtained by ioremap. |
---|
284 | * Returns 1 on a match. |
---|
285 | */ |
---|
286 | |
---|
287 | static inline int check_signature(void __iomem *io_addr, |
---|
288 | const unsigned char *signature, int length) |
---|
289 | { |
---|
290 | int retval = 0; |
---|
291 | do { |
---|
292 | if (readb(io_addr) != *signature) |
---|
293 | goto out; |
---|
294 | io_addr++; |
---|
295 | signature++; |
---|
296 | length--; |
---|
297 | } while (length); |
---|
298 | retval = 1; |
---|
299 | out: |
---|
300 | return retval; |
---|
301 | } |
---|
302 | |
---|
303 | /* Nothing to do */ |
---|
304 | |
---|
305 | #define dma_cache_inv(_start,_size) do { } while (0) |
---|
306 | #define dma_cache_wback(_start,_size) do { } while (0) |
---|
307 | #define dma_cache_wback_inv(_start,_size) do { } while (0) |
---|
308 | |
---|
309 | #define flush_write_buffers() |
---|
310 | |
---|
311 | extern int iommu_bio_merge; |
---|
312 | #define BIO_VMERGE_BOUNDARY iommu_bio_merge |
---|
313 | |
---|
314 | /* |
---|
315 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem |
---|
316 | * access |
---|
317 | */ |
---|
318 | #define xlate_dev_mem_ptr(p, sz) ioremap(p, sz) |
---|
319 | #define xlate_dev_mem_ptr_unmap(p) iounmap(p) |
---|
320 | |
---|
321 | /* |
---|
322 | * Convert a virtual cached pointer to an uncached pointer |
---|
323 | */ |
---|
324 | #define xlate_dev_kmem_ptr(p) p |
---|
325 | |
---|
326 | #endif /* __KERNEL__ */ |
---|
327 | |
---|
328 | #define ARCH_HAS_DEV_MEM |
---|
329 | |
---|
330 | #endif |
---|