1 | #include <linux/init.h> |
---|
2 | #include <linux/string.h> |
---|
3 | #include <linux/delay.h> |
---|
4 | #include <linux/smp.h> |
---|
5 | #include <linux/module.h> |
---|
6 | #include <linux/percpu.h> |
---|
7 | #include <linux/bootmem.h> |
---|
8 | #include <asm/semaphore.h> |
---|
9 | #include <asm/processor.h> |
---|
10 | #include <asm/i387.h> |
---|
11 | #include <asm/msr.h> |
---|
12 | #include <asm/io.h> |
---|
13 | #include <asm/mmu_context.h> |
---|
14 | #include <asm/mtrr.h> |
---|
15 | #include <asm/mce.h> |
---|
16 | #ifdef CONFIG_X86_LOCAL_APIC |
---|
17 | #include <asm/mpspec.h> |
---|
18 | #include <asm/apic.h> |
---|
19 | #include <mach_apic.h> |
---|
20 | #else |
---|
21 | #ifdef CONFIG_XEN |
---|
22 | #define phys_pkg_id(a,b) a |
---|
23 | #endif |
---|
24 | #endif |
---|
25 | #include <asm/hypervisor.h> |
---|
26 | |
---|
27 | #include "cpu.h" |
---|
28 | |
---|
29 | DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); |
---|
30 | EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); |
---|
31 | |
---|
32 | #ifndef CONFIG_XEN |
---|
33 | DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); |
---|
34 | EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); |
---|
35 | #endif |
---|
36 | |
---|
37 | static int cachesize_override __cpuinitdata = -1; |
---|
38 | static int disable_x86_fxsr __cpuinitdata; |
---|
39 | static int disable_x86_serial_nr __cpuinitdata = 1; |
---|
40 | static int disable_x86_sep __cpuinitdata; |
---|
41 | |
---|
42 | struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; |
---|
43 | |
---|
44 | extern int disable_pse; |
---|
45 | |
---|
46 | static void default_init(struct cpuinfo_x86 * c) |
---|
47 | { |
---|
48 | /* Not much we can do here... */ |
---|
49 | /* Check if at least it has cpuid */ |
---|
50 | if (c->cpuid_level == -1) { |
---|
51 | /* No cpuid. It must be an ancient CPU */ |
---|
52 | if (c->x86 == 4) |
---|
53 | strcpy(c->x86_model_id, "486"); |
---|
54 | else if (c->x86 == 3) |
---|
55 | strcpy(c->x86_model_id, "386"); |
---|
56 | } |
---|
57 | } |
---|
58 | |
---|
59 | static struct cpu_dev default_cpu = { |
---|
60 | .c_init = default_init, |
---|
61 | .c_vendor = "Unknown", |
---|
62 | }; |
---|
63 | static struct cpu_dev * this_cpu = &default_cpu; |
---|
64 | |
---|
65 | static int __init cachesize_setup(char *str) |
---|
66 | { |
---|
67 | get_option (&str, &cachesize_override); |
---|
68 | return 1; |
---|
69 | } |
---|
70 | __setup("cachesize=", cachesize_setup); |
---|
71 | |
---|
72 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) |
---|
73 | { |
---|
74 | unsigned int *v; |
---|
75 | char *p, *q; |
---|
76 | |
---|
77 | if (cpuid_eax(0x80000000) < 0x80000004) |
---|
78 | return 0; |
---|
79 | |
---|
80 | v = (unsigned int *) c->x86_model_id; |
---|
81 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); |
---|
82 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); |
---|
83 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); |
---|
84 | c->x86_model_id[48] = 0; |
---|
85 | |
---|
86 | /* Intel chips right-justify this string for some dumb reason; |
---|
87 | undo that brain damage */ |
---|
88 | p = q = &c->x86_model_id[0]; |
---|
89 | while ( *p == ' ' ) |
---|
90 | p++; |
---|
91 | if ( p != q ) { |
---|
92 | while ( *p ) |
---|
93 | *q++ = *p++; |
---|
94 | while ( q <= &c->x86_model_id[48] ) |
---|
95 | *q++ = '\0'; /* Zero-pad the rest */ |
---|
96 | } |
---|
97 | |
---|
98 | return 1; |
---|
99 | } |
---|
100 | |
---|
101 | |
---|
102 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) |
---|
103 | { |
---|
104 | unsigned int n, dummy, ecx, edx, l2size; |
---|
105 | |
---|
106 | n = cpuid_eax(0x80000000); |
---|
107 | |
---|
108 | if (n >= 0x80000005) { |
---|
109 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); |
---|
110 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", |
---|
111 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); |
---|
112 | c->x86_cache_size=(ecx>>24)+(edx>>24); |
---|
113 | } |
---|
114 | |
---|
115 | if (n < 0x80000006) /* Some chips just has a large L1. */ |
---|
116 | return; |
---|
117 | |
---|
118 | ecx = cpuid_ecx(0x80000006); |
---|
119 | l2size = ecx >> 16; |
---|
120 | |
---|
121 | /* do processor-specific cache resizing */ |
---|
122 | if (this_cpu->c_size_cache) |
---|
123 | l2size = this_cpu->c_size_cache(c,l2size); |
---|
124 | |
---|
125 | /* Allow user to override all this if necessary. */ |
---|
126 | if (cachesize_override != -1) |
---|
127 | l2size = cachesize_override; |
---|
128 | |
---|
129 | if ( l2size == 0 ) |
---|
130 | return; /* Again, no L2 cache is possible */ |
---|
131 | |
---|
132 | c->x86_cache_size = l2size; |
---|
133 | |
---|
134 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", |
---|
135 | l2size, ecx & 0xFF); |
---|
136 | } |
---|
137 | |
---|
138 | /* Naming convention should be: <Name> [(<Codename>)] */ |
---|
139 | /* This table only is used unless init_<vendor>() below doesn't set it; */ |
---|
140 | /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ |
---|
141 | |
---|
142 | /* Look up CPU names by table lookup. */ |
---|
143 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) |
---|
144 | { |
---|
145 | struct cpu_model_info *info; |
---|
146 | |
---|
147 | if ( c->x86_model >= 16 ) |
---|
148 | return NULL; /* Range check */ |
---|
149 | |
---|
150 | if (!this_cpu) |
---|
151 | return NULL; |
---|
152 | |
---|
153 | info = this_cpu->c_models; |
---|
154 | |
---|
155 | while (info && info->family) { |
---|
156 | if (info->family == c->x86) |
---|
157 | return info->model_names[c->x86_model]; |
---|
158 | info++; |
---|
159 | } |
---|
160 | return NULL; /* Not found */ |
---|
161 | } |
---|
162 | |
---|
163 | |
---|
164 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) |
---|
165 | { |
---|
166 | char *v = c->x86_vendor_id; |
---|
167 | int i; |
---|
168 | static int printed; |
---|
169 | |
---|
170 | for (i = 0; i < X86_VENDOR_NUM; i++) { |
---|
171 | if (cpu_devs[i]) { |
---|
172 | if (!strcmp(v,cpu_devs[i]->c_ident[0]) || |
---|
173 | (cpu_devs[i]->c_ident[1] && |
---|
174 | !strcmp(v,cpu_devs[i]->c_ident[1]))) { |
---|
175 | c->x86_vendor = i; |
---|
176 | if (!early) |
---|
177 | this_cpu = cpu_devs[i]; |
---|
178 | return; |
---|
179 | } |
---|
180 | } |
---|
181 | } |
---|
182 | if (!printed) { |
---|
183 | printed++; |
---|
184 | printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); |
---|
185 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); |
---|
186 | } |
---|
187 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
---|
188 | this_cpu = &default_cpu; |
---|
189 | } |
---|
190 | |
---|
191 | |
---|
192 | static int __init x86_fxsr_setup(char * s) |
---|
193 | { |
---|
194 | disable_x86_fxsr = 1; |
---|
195 | return 1; |
---|
196 | } |
---|
197 | __setup("nofxsr", x86_fxsr_setup); |
---|
198 | |
---|
199 | |
---|
200 | static int __init x86_sep_setup(char * s) |
---|
201 | { |
---|
202 | disable_x86_sep = 1; |
---|
203 | return 1; |
---|
204 | } |
---|
205 | __setup("nosep", x86_sep_setup); |
---|
206 | |
---|
207 | |
---|
208 | /* Standard macro to see if a specific flag is changeable */ |
---|
209 | static inline int flag_is_changeable_p(u32 flag) |
---|
210 | { |
---|
211 | u32 f1, f2; |
---|
212 | |
---|
213 | asm("pushfl\n\t" |
---|
214 | "pushfl\n\t" |
---|
215 | "popl %0\n\t" |
---|
216 | "movl %0,%1\n\t" |
---|
217 | "xorl %2,%0\n\t" |
---|
218 | "pushl %0\n\t" |
---|
219 | "popfl\n\t" |
---|
220 | "pushfl\n\t" |
---|
221 | "popl %0\n\t" |
---|
222 | "popfl\n\t" |
---|
223 | : "=&r" (f1), "=&r" (f2) |
---|
224 | : "ir" (flag)); |
---|
225 | |
---|
226 | return ((f1^f2) & flag) != 0; |
---|
227 | } |
---|
228 | |
---|
229 | |
---|
230 | /* Probe for the CPUID instruction */ |
---|
231 | static int __cpuinit have_cpuid_p(void) |
---|
232 | { |
---|
233 | return flag_is_changeable_p(X86_EFLAGS_ID); |
---|
234 | } |
---|
235 | |
---|
236 | /* Do minimum CPU detection early. |
---|
237 | Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. |
---|
238 | The others are not touched to avoid unwanted side effects. |
---|
239 | |
---|
240 | WARNING: this function is only called on the BP. Don't add code here |
---|
241 | that is supposed to run on all CPUs. */ |
---|
242 | static void __init early_cpu_detect(void) |
---|
243 | { |
---|
244 | struct cpuinfo_x86 *c = &boot_cpu_data; |
---|
245 | |
---|
246 | c->x86_cache_alignment = 32; |
---|
247 | |
---|
248 | if (!have_cpuid_p()) |
---|
249 | return; |
---|
250 | |
---|
251 | /* Get vendor name */ |
---|
252 | cpuid(0x00000000, &c->cpuid_level, |
---|
253 | (int *)&c->x86_vendor_id[0], |
---|
254 | (int *)&c->x86_vendor_id[8], |
---|
255 | (int *)&c->x86_vendor_id[4]); |
---|
256 | |
---|
257 | get_cpu_vendor(c, 1); |
---|
258 | |
---|
259 | c->x86 = 4; |
---|
260 | if (c->cpuid_level >= 0x00000001) { |
---|
261 | u32 junk, tfms, cap0, misc; |
---|
262 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
---|
263 | c->x86 = (tfms >> 8) & 15; |
---|
264 | c->x86_model = (tfms >> 4) & 15; |
---|
265 | if (c->x86 == 0xf) |
---|
266 | c->x86 += (tfms >> 20) & 0xff; |
---|
267 | if (c->x86 >= 0x6) |
---|
268 | c->x86_model += ((tfms >> 16) & 0xF) << 4; |
---|
269 | c->x86_mask = tfms & 15; |
---|
270 | if (cap0 & (1<<19)) |
---|
271 | c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; |
---|
272 | } |
---|
273 | } |
---|
274 | |
---|
275 | void __cpuinit generic_identify(struct cpuinfo_x86 * c) |
---|
276 | { |
---|
277 | u32 tfms, xlvl; |
---|
278 | int ebx; |
---|
279 | |
---|
280 | if (have_cpuid_p()) { |
---|
281 | /* Get vendor name */ |
---|
282 | cpuid(0x00000000, &c->cpuid_level, |
---|
283 | (int *)&c->x86_vendor_id[0], |
---|
284 | (int *)&c->x86_vendor_id[8], |
---|
285 | (int *)&c->x86_vendor_id[4]); |
---|
286 | |
---|
287 | get_cpu_vendor(c, 0); |
---|
288 | /* Initialize the standard set of capabilities */ |
---|
289 | /* Note that the vendor-specific code below might override */ |
---|
290 | |
---|
291 | /* Intel-defined flags: level 0x00000001 */ |
---|
292 | if ( c->cpuid_level >= 0x00000001 ) { |
---|
293 | u32 capability, excap; |
---|
294 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
---|
295 | c->x86_capability[0] = capability; |
---|
296 | c->x86_capability[4] = excap; |
---|
297 | c->x86 = (tfms >> 8) & 15; |
---|
298 | c->x86_model = (tfms >> 4) & 15; |
---|
299 | if (c->x86 == 0xf) |
---|
300 | c->x86 += (tfms >> 20) & 0xff; |
---|
301 | if (c->x86 >= 0x6) |
---|
302 | c->x86_model += ((tfms >> 16) & 0xF) << 4; |
---|
303 | c->x86_mask = tfms & 15; |
---|
304 | #ifdef CONFIG_X86_HT |
---|
305 | c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); |
---|
306 | #else |
---|
307 | c->apicid = (ebx >> 24) & 0xFF; |
---|
308 | #endif |
---|
309 | } else { |
---|
310 | /* Have CPUID level 0 only - unheard of */ |
---|
311 | c->x86 = 4; |
---|
312 | } |
---|
313 | |
---|
314 | /* AMD-defined flags: level 0x80000001 */ |
---|
315 | xlvl = cpuid_eax(0x80000000); |
---|
316 | if ( (xlvl & 0xffff0000) == 0x80000000 ) { |
---|
317 | if ( xlvl >= 0x80000001 ) { |
---|
318 | c->x86_capability[1] = cpuid_edx(0x80000001); |
---|
319 | c->x86_capability[6] = cpuid_ecx(0x80000001); |
---|
320 | } |
---|
321 | if ( xlvl >= 0x80000004 ) |
---|
322 | get_model_name(c); /* Default name */ |
---|
323 | } |
---|
324 | } |
---|
325 | |
---|
326 | early_intel_workaround(c); |
---|
327 | |
---|
328 | #ifdef CONFIG_X86_HT |
---|
329 | c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; |
---|
330 | #endif |
---|
331 | } |
---|
332 | |
---|
333 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
---|
334 | { |
---|
335 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { |
---|
336 | /* Disable processor serial number */ |
---|
337 | unsigned long lo,hi; |
---|
338 | rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi); |
---|
339 | lo |= 0x200000; |
---|
340 | wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi); |
---|
341 | printk(KERN_NOTICE "CPU serial number disabled.\n"); |
---|
342 | clear_bit(X86_FEATURE_PN, c->x86_capability); |
---|
343 | |
---|
344 | /* Disabling the serial number may affect the cpuid level */ |
---|
345 | c->cpuid_level = cpuid_eax(0); |
---|
346 | } |
---|
347 | } |
---|
348 | |
---|
349 | static int __init x86_serial_nr_setup(char *s) |
---|
350 | { |
---|
351 | disable_x86_serial_nr = 0; |
---|
352 | return 1; |
---|
353 | } |
---|
354 | __setup("serialnumber", x86_serial_nr_setup); |
---|
355 | |
---|
356 | |
---|
357 | |
---|
358 | /* |
---|
359 | * This does the hard work of actually picking apart the CPU stuff... |
---|
360 | */ |
---|
361 | void __cpuinit identify_cpu(struct cpuinfo_x86 *c) |
---|
362 | { |
---|
363 | int i; |
---|
364 | |
---|
365 | c->loops_per_jiffy = loops_per_jiffy; |
---|
366 | c->x86_cache_size = -1; |
---|
367 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
---|
368 | c->cpuid_level = -1; /* CPUID not detected */ |
---|
369 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ |
---|
370 | c->x86_vendor_id[0] = '\0'; /* Unset */ |
---|
371 | c->x86_model_id[0] = '\0'; /* Unset */ |
---|
372 | c->x86_max_cores = 1; |
---|
373 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
---|
374 | |
---|
375 | if (!have_cpuid_p()) { |
---|
376 | /* First of all, decide if this is a 486 or higher */ |
---|
377 | /* It's a 486 if we can modify the AC flag */ |
---|
378 | if ( flag_is_changeable_p(X86_EFLAGS_AC) ) |
---|
379 | c->x86 = 4; |
---|
380 | else |
---|
381 | c->x86 = 3; |
---|
382 | } |
---|
383 | |
---|
384 | generic_identify(c); |
---|
385 | |
---|
386 | printk(KERN_DEBUG "CPU: After generic identify, caps:"); |
---|
387 | for (i = 0; i < NCAPINTS; i++) |
---|
388 | printk(" %08lx", c->x86_capability[i]); |
---|
389 | printk("\n"); |
---|
390 | |
---|
391 | if (this_cpu->c_identify) { |
---|
392 | this_cpu->c_identify(c); |
---|
393 | |
---|
394 | printk(KERN_DEBUG "CPU: After vendor identify, caps:"); |
---|
395 | for (i = 0; i < NCAPINTS; i++) |
---|
396 | printk(" %08lx", c->x86_capability[i]); |
---|
397 | printk("\n"); |
---|
398 | } |
---|
399 | |
---|
400 | /* |
---|
401 | * Vendor-specific initialization. In this section we |
---|
402 | * canonicalize the feature flags, meaning if there are |
---|
403 | * features a certain CPU supports which CPUID doesn't |
---|
404 | * tell us, CPUID claiming incorrect flags, or other bugs, |
---|
405 | * we handle them here. |
---|
406 | * |
---|
407 | * At the end of this section, c->x86_capability better |
---|
408 | * indicate the features this CPU genuinely supports! |
---|
409 | */ |
---|
410 | if (this_cpu->c_init) |
---|
411 | this_cpu->c_init(c); |
---|
412 | |
---|
413 | /* Disable the PN if appropriate */ |
---|
414 | squash_the_stupid_serial_number(c); |
---|
415 | |
---|
416 | /* |
---|
417 | * The vendor-specific functions might have changed features. Now |
---|
418 | * we do "generic changes." |
---|
419 | */ |
---|
420 | |
---|
421 | /* TSC disabled? */ |
---|
422 | if ( tsc_disable ) |
---|
423 | clear_bit(X86_FEATURE_TSC, c->x86_capability); |
---|
424 | |
---|
425 | /* FXSR disabled? */ |
---|
426 | if (disable_x86_fxsr) { |
---|
427 | clear_bit(X86_FEATURE_FXSR, c->x86_capability); |
---|
428 | clear_bit(X86_FEATURE_XMM, c->x86_capability); |
---|
429 | } |
---|
430 | |
---|
431 | /* SEP disabled? */ |
---|
432 | if (disable_x86_sep) |
---|
433 | clear_bit(X86_FEATURE_SEP, c->x86_capability); |
---|
434 | |
---|
435 | if (disable_pse) |
---|
436 | clear_bit(X86_FEATURE_PSE, c->x86_capability); |
---|
437 | |
---|
438 | /* If the model name is still unset, do table lookup. */ |
---|
439 | if ( !c->x86_model_id[0] ) { |
---|
440 | char *p; |
---|
441 | p = table_lookup_model(c); |
---|
442 | if ( p ) |
---|
443 | strcpy(c->x86_model_id, p); |
---|
444 | else |
---|
445 | /* Last resort... */ |
---|
446 | sprintf(c->x86_model_id, "%02x/%02x", |
---|
447 | c->x86, c->x86_model); |
---|
448 | } |
---|
449 | |
---|
450 | /* Now the feature flags better reflect actual CPU features! */ |
---|
451 | |
---|
452 | printk(KERN_DEBUG "CPU: After all inits, caps:"); |
---|
453 | for (i = 0; i < NCAPINTS; i++) |
---|
454 | printk(" %08lx", c->x86_capability[i]); |
---|
455 | printk("\n"); |
---|
456 | |
---|
457 | /* |
---|
458 | * On SMP, boot_cpu_data holds the common feature set between |
---|
459 | * all CPUs; so make sure that we indicate which features are |
---|
460 | * common between the CPUs. The first time this routine gets |
---|
461 | * executed, c == &boot_cpu_data. |
---|
462 | */ |
---|
463 | if ( c != &boot_cpu_data ) { |
---|
464 | /* AND the already accumulated flags with these */ |
---|
465 | for ( i = 0 ; i < NCAPINTS ; i++ ) |
---|
466 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
---|
467 | } |
---|
468 | |
---|
469 | /* Init Machine Check Exception if available. */ |
---|
470 | mcheck_init(c); |
---|
471 | |
---|
472 | if (c == &boot_cpu_data) |
---|
473 | sysenter_setup(); |
---|
474 | enable_sep_cpu(); |
---|
475 | |
---|
476 | if (c == &boot_cpu_data) |
---|
477 | mtrr_bp_init(); |
---|
478 | else |
---|
479 | mtrr_ap_init(); |
---|
480 | } |
---|
481 | |
---|
482 | #ifdef CONFIG_X86_HT |
---|
483 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) |
---|
484 | { |
---|
485 | u32 eax, ebx, ecx, edx; |
---|
486 | int index_msb, core_bits; |
---|
487 | |
---|
488 | cpuid(1, &eax, &ebx, &ecx, &edx); |
---|
489 | |
---|
490 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
---|
491 | return; |
---|
492 | |
---|
493 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
---|
494 | |
---|
495 | if (smp_num_siblings == 1) { |
---|
496 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); |
---|
497 | } else if (smp_num_siblings > 1 ) { |
---|
498 | |
---|
499 | if (smp_num_siblings > NR_CPUS) { |
---|
500 | printk(KERN_WARNING "CPU: Unsupported number of the " |
---|
501 | "siblings %d", smp_num_siblings); |
---|
502 | smp_num_siblings = 1; |
---|
503 | return; |
---|
504 | } |
---|
505 | |
---|
506 | index_msb = get_count_order(smp_num_siblings); |
---|
507 | c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); |
---|
508 | |
---|
509 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
---|
510 | c->phys_proc_id); |
---|
511 | |
---|
512 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; |
---|
513 | |
---|
514 | index_msb = get_count_order(smp_num_siblings) ; |
---|
515 | |
---|
516 | core_bits = get_count_order(c->x86_max_cores); |
---|
517 | |
---|
518 | c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & |
---|
519 | ((1 << core_bits) - 1); |
---|
520 | |
---|
521 | if (c->x86_max_cores > 1) |
---|
522 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", |
---|
523 | c->cpu_core_id); |
---|
524 | } |
---|
525 | } |
---|
526 | #endif |
---|
527 | |
---|
528 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) |
---|
529 | { |
---|
530 | char *vendor = NULL; |
---|
531 | |
---|
532 | if (c->x86_vendor < X86_VENDOR_NUM) |
---|
533 | vendor = this_cpu->c_vendor; |
---|
534 | else if (c->cpuid_level >= 0) |
---|
535 | vendor = c->x86_vendor_id; |
---|
536 | |
---|
537 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) |
---|
538 | printk("%s ", vendor); |
---|
539 | |
---|
540 | if (!c->x86_model_id[0]) |
---|
541 | printk("%d86", c->x86); |
---|
542 | else |
---|
543 | printk("%s", c->x86_model_id); |
---|
544 | |
---|
545 | if (c->x86_mask || c->cpuid_level >= 0) |
---|
546 | printk(" stepping %02x\n", c->x86_mask); |
---|
547 | else |
---|
548 | printk("\n"); |
---|
549 | } |
---|
550 | |
---|
551 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; |
---|
552 | |
---|
553 | /* This is hacky. :) |
---|
554 | * We're emulating future behavior. |
---|
555 | * In the future, the cpu-specific init functions will be called implicitly |
---|
556 | * via the magic of initcalls. |
---|
557 | * They will insert themselves into the cpu_devs structure. |
---|
558 | * Then, when cpu_init() is called, we can just iterate over that array. |
---|
559 | */ |
---|
560 | |
---|
561 | extern int intel_cpu_init(void); |
---|
562 | extern int cyrix_init_cpu(void); |
---|
563 | extern int nsc_init_cpu(void); |
---|
564 | extern int amd_init_cpu(void); |
---|
565 | extern int centaur_init_cpu(void); |
---|
566 | extern int transmeta_init_cpu(void); |
---|
567 | extern int rise_init_cpu(void); |
---|
568 | extern int nexgen_init_cpu(void); |
---|
569 | extern int umc_init_cpu(void); |
---|
570 | |
---|
571 | void __init early_cpu_init(void) |
---|
572 | { |
---|
573 | intel_cpu_init(); |
---|
574 | cyrix_init_cpu(); |
---|
575 | nsc_init_cpu(); |
---|
576 | amd_init_cpu(); |
---|
577 | centaur_init_cpu(); |
---|
578 | transmeta_init_cpu(); |
---|
579 | rise_init_cpu(); |
---|
580 | nexgen_init_cpu(); |
---|
581 | umc_init_cpu(); |
---|
582 | early_cpu_detect(); |
---|
583 | |
---|
584 | #ifdef CONFIG_DEBUG_PAGEALLOC |
---|
585 | /* pse is not compatible with on-the-fly unmapping, |
---|
586 | * disable it even if the cpus claim to support it. |
---|
587 | */ |
---|
588 | clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); |
---|
589 | disable_pse = 1; |
---|
590 | #endif |
---|
591 | } |
---|
592 | |
---|
593 | void __cpuinit cpu_gdt_init(struct Xgt_desc_struct *gdt_descr) |
---|
594 | { |
---|
595 | unsigned long frames[16]; |
---|
596 | unsigned long va; |
---|
597 | int f; |
---|
598 | |
---|
599 | for (va = gdt_descr->address, f = 0; |
---|
600 | va < gdt_descr->address + gdt_descr->size; |
---|
601 | va += PAGE_SIZE, f++) { |
---|
602 | frames[f] = virt_to_mfn(va); |
---|
603 | make_lowmem_page_readonly( |
---|
604 | (void *)va, XENFEAT_writable_descriptor_tables); |
---|
605 | } |
---|
606 | if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8)) |
---|
607 | BUG(); |
---|
608 | } |
---|
609 | |
---|
610 | /* |
---|
611 | * cpu_init() initializes state that is per-CPU. Some data is already |
---|
612 | * initialized (naturally) in the bootstrap process, such as the GDT |
---|
613 | * and IDT. We reload them nevertheless, this function acts as a |
---|
614 | * 'CPU state barrier', nothing should get across. |
---|
615 | */ |
---|
616 | void __cpuinit cpu_init(void) |
---|
617 | { |
---|
618 | int cpu = smp_processor_id(); |
---|
619 | #ifndef CONFIG_X86_NO_TSS |
---|
620 | struct tss_struct * t = &per_cpu(init_tss, cpu); |
---|
621 | #endif |
---|
622 | struct thread_struct *thread = ¤t->thread; |
---|
623 | struct desc_struct *gdt; |
---|
624 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); |
---|
625 | |
---|
626 | if (cpu_test_and_set(cpu, cpu_initialized)) { |
---|
627 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); |
---|
628 | for (;;) local_irq_enable(); |
---|
629 | } |
---|
630 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); |
---|
631 | |
---|
632 | if (cpu_has_vme || cpu_has_de) |
---|
633 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
---|
634 | if (tsc_disable && cpu_has_tsc) { |
---|
635 | printk(KERN_NOTICE "Disabling TSC...\n"); |
---|
636 | /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/ |
---|
637 | clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability); |
---|
638 | set_in_cr4(X86_CR4_TSD); |
---|
639 | } |
---|
640 | |
---|
641 | #ifndef CONFIG_XEN |
---|
642 | /* The CPU hotplug case */ |
---|
643 | if (cpu_gdt_descr->address) { |
---|
644 | gdt = (struct desc_struct *)cpu_gdt_descr->address; |
---|
645 | memset(gdt, 0, PAGE_SIZE); |
---|
646 | goto old_gdt; |
---|
647 | } |
---|
648 | /* |
---|
649 | * This is a horrible hack to allocate the GDT. The problem |
---|
650 | * is that cpu_init() is called really early for the boot CPU |
---|
651 | * (and hence needs bootmem) but much later for the secondary |
---|
652 | * CPUs, when bootmem will have gone away |
---|
653 | */ |
---|
654 | if (NODE_DATA(0)->bdata->node_bootmem_map) { |
---|
655 | gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE); |
---|
656 | /* alloc_bootmem_pages panics on failure, so no check */ |
---|
657 | memset(gdt, 0, PAGE_SIZE); |
---|
658 | } else { |
---|
659 | gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL); |
---|
660 | if (unlikely(!gdt)) { |
---|
661 | printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu); |
---|
662 | for (;;) |
---|
663 | local_irq_enable(); |
---|
664 | } |
---|
665 | } |
---|
666 | old_gdt: |
---|
667 | /* |
---|
668 | * Initialize the per-CPU GDT with the boot GDT, |
---|
669 | * and set up the GDT descriptor: |
---|
670 | */ |
---|
671 | memcpy(gdt, cpu_gdt_table, GDT_SIZE); |
---|
672 | |
---|
673 | /* Set up GDT entry for 16bit stack */ |
---|
674 | *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |= |
---|
675 | ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) | |
---|
676 | ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) | |
---|
677 | (CPU_16BIT_STACK_SIZE - 1); |
---|
678 | |
---|
679 | cpu_gdt_descr->size = GDT_SIZE - 1; |
---|
680 | cpu_gdt_descr->address = (unsigned long)gdt; |
---|
681 | #else |
---|
682 | if (cpu == 0 && cpu_gdt_descr->address == 0) { |
---|
683 | gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE); |
---|
684 | /* alloc_bootmem_pages panics on failure, so no check */ |
---|
685 | memset(gdt, 0, PAGE_SIZE); |
---|
686 | |
---|
687 | memcpy(gdt, cpu_gdt_table, GDT_SIZE); |
---|
688 | |
---|
689 | cpu_gdt_descr->size = GDT_SIZE; |
---|
690 | cpu_gdt_descr->address = (unsigned long)gdt; |
---|
691 | } |
---|
692 | #endif |
---|
693 | |
---|
694 | cpu_gdt_init(cpu_gdt_descr); |
---|
695 | |
---|
696 | /* |
---|
697 | * Set up and load the per-CPU TSS and LDT |
---|
698 | */ |
---|
699 | atomic_inc(&init_mm.mm_count); |
---|
700 | current->active_mm = &init_mm; |
---|
701 | if (current->mm) |
---|
702 | BUG(); |
---|
703 | enter_lazy_tlb(&init_mm, current); |
---|
704 | |
---|
705 | load_esp0(t, thread); |
---|
706 | |
---|
707 | load_LDT(&init_mm.context); |
---|
708 | |
---|
709 | #ifdef CONFIG_DOUBLEFAULT |
---|
710 | /* Set up doublefault TSS pointer in the GDT */ |
---|
711 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); |
---|
712 | #endif |
---|
713 | |
---|
714 | /* Clear %fs and %gs. */ |
---|
715 | asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs"); |
---|
716 | |
---|
717 | /* Clear all 6 debug registers: */ |
---|
718 | set_debugreg(0, 0); |
---|
719 | set_debugreg(0, 1); |
---|
720 | set_debugreg(0, 2); |
---|
721 | set_debugreg(0, 3); |
---|
722 | set_debugreg(0, 6); |
---|
723 | set_debugreg(0, 7); |
---|
724 | |
---|
725 | /* |
---|
726 | * Force FPU initialization: |
---|
727 | */ |
---|
728 | current_thread_info()->status = 0; |
---|
729 | clear_used_math(); |
---|
730 | mxcsr_feature_mask_init(); |
---|
731 | } |
---|
732 | |
---|
733 | #ifdef CONFIG_HOTPLUG_CPU |
---|
734 | void __cpuinit cpu_uninit(void) |
---|
735 | { |
---|
736 | int cpu = raw_smp_processor_id(); |
---|
737 | cpu_clear(cpu, cpu_initialized); |
---|
738 | |
---|
739 | /* lazy TLB state */ |
---|
740 | per_cpu(cpu_tlbstate, cpu).state = 0; |
---|
741 | per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; |
---|
742 | } |
---|
743 | #endif |
---|