diff options
Diffstat (limited to 'arch/i386/kernel/cpu')
-rw-r--r-- | arch/i386/kernel/cpu/amd.c | 22 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/centaur.c | 12 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/changelog | 63 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/common.c | 72 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/Kconfig | 1 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c | 76 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/p4-clockmod.c | 9 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/powernow-k8.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c | 17 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/speedstep-est-common.h | 25 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cyrix.c | 43 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/intel.c | 5 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/intel_cacheinfo.c | 12 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mtrr/changelog | 229 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mtrr/if.c | 1 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mtrr/main.c | 15 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/nexgen.c | 8 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/proc.c | 27 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/rise.c | 8 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/transmeta.c | 11 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/umc.c | 8 |
21 files changed, 219 insertions, 449 deletions
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index e344ef8..0810f81 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c @@ -161,8 +161,13 @@ static void __init init_amd(struct cpuinfo_x86 *c) set_bit(X86_FEATURE_K6_MTRR, c->x86_capability); break; } - break; + if (c->x86_model == 10) { + /* AMD Geode LX is model 10 */ + /* placeholder for any needed mods */ + break; + } + break; case 6: /* An Athlon/Duron */ /* Bit 15 of Athlon specific MSR 15, needs to be 0 @@ -211,6 +216,12 @@ static void __init init_amd(struct cpuinfo_x86 *c) c->x86_max_cores = 1; } + if (cpuid_eax(0x80000000) >= 0x80000007) { + c->x86_power = cpuid_edx(0x80000007); + if (c->x86_power & (1<<8)) + set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); + } + #ifdef CONFIG_X86_HT /* * On a AMD dual core setup the lower bits of the APIC id @@ -228,6 +239,7 @@ static void __init init_amd(struct cpuinfo_x86 *c) cpu, c->x86_max_cores, cpu_core_id[cpu]); } #endif + } static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) @@ -270,3 +282,11 @@ int __init amd_init_cpu(void) } //early_arch_initcall(amd_init_cpu); + +static int __init amd_exit_cpu(void) +{ + cpu_devs[X86_VENDOR_AMD] = NULL; + return 0; +} + +late_initcall(amd_exit_cpu); diff --git a/arch/i386/kernel/cpu/centaur.c b/arch/i386/kernel/cpu/centaur.c index 394814e..f52669e 100644 --- a/arch/i386/kernel/cpu/centaur.c +++ b/arch/i386/kernel/cpu/centaur.c @@ -405,10 +405,6 @@ static void __init init_centaur(struct cpuinfo_x86 *c) winchip2_protect_mcr(); #endif break; - case 10: - name="4"; - /* no info on the WC4 yet */ - break; default: name="??"; } @@ -474,3 +470,11 @@ int __init centaur_init_cpu(void) } //early_arch_initcall(centaur_init_cpu); + +static int __init centaur_exit_cpu(void) +{ + cpu_devs[X86_VENDOR_CENTAUR] = NULL; + return 0; +} + +late_initcall(centaur_exit_cpu); diff --git a/arch/i386/kernel/cpu/changelog b/arch/i386/kernel/cpu/changelog deleted file mode 100644 index cef76b8..0000000 --- a/arch/i386/kernel/cpu/changelog +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Enhanced CPU type detection by Mike Jagdis, Patrick St. Jean - * and Martin Mares, November 1997. - * - * Force Cyrix 6x86(MX) and M II processors to report MTRR capability - * and Cyrix "coma bug" recognition by - * Zoltán Böszörményi <zboszor@mail.externet.hu> February 1999. - * - * Force Centaur C6 processors to report MTRR capability. - * Bart Hartgers <bart@etpmod.phys.tue.nl>, May 1999. - * - * Intel Mobile Pentium II detection fix. Sean Gilley, June 1999. - * - * IDT Winchip tweaks, misc clean ups. - * Dave Jones <davej@suse.de>, August 1999 - * - * Better detection of Centaur/IDT WinChip models. - * Bart Hartgers <bart@etpmod.phys.tue.nl>, August 1999. - * - * Cleaned up cache-detection code - * Dave Jones <davej@suse.de>, October 1999 - * - * Added proper L2 cache detection for Coppermine - * Dragan Stancevic <visitor@valinux.com>, October 1999 - * - * Added the original array for capability flags but forgot to credit - * myself :) (~1998) Fixed/cleaned up some cpu_model_info and other stuff - * Jauder Ho <jauderho@carumba.com>, January 2000 - * - * Detection for Celeron coppermine, identify_cpu() overhauled, - * and a few other clean ups. - * Dave Jones <davej@suse.de>, April 2000 - * - * Pentium III FXSR, SSE support - * General FPU state handling cleanups - * Gareth Hughes <gareth@valinux.com>, May 2000 - * - * Added proper Cascades CPU and L2 cache detection for Cascades - * and 8-way type cache happy bunch from Intel:^) - * Dragan Stancevic <visitor@valinux.com>, May 2000 - * - * Forward port AMD Duron errata T13 from 2.2.17pre - * Dave Jones <davej@suse.de>, August 2000 - * - * Forward port lots of fixes/improvements from 2.2.18pre - * Cyrix III, Pentium IV support. - * Dave Jones <davej@suse.de>, October 2000 - * - * Massive cleanup of CPU detection and bug handling; - * Transmeta CPU detection, - * H. Peter Anvin <hpa@zytor.com>, November 2000 - * - * VIA C3 Support. - * Dave Jones <davej@suse.de>, March 2001 - * - * AMD Athlon/Duron/Thunderbird bluesmoke support. - * Dave Jones <davej@suse.de>, April 2001. - * - * CacheSize bug workaround updates for AMD, Intel & VIA Cyrix. - * Dave Jones <davej@suse.de>, September, October 2001. - * - */ - diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 31e344b..e6bd095 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c @@ -4,6 +4,7 @@ #include <linux/smp.h> #include <linux/module.h> #include <linux/percpu.h> +#include <linux/bootmem.h> #include <asm/semaphore.h> #include <asm/processor.h> #include <asm/i387.h> @@ -18,8 +19,8 @@ #include "cpu.h" -DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]); -EXPORT_PER_CPU_SYMBOL(cpu_gdt_table); +DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); +EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); @@ -47,6 +48,7 @@ static void default_init(struct cpuinfo_x86 * c) static struct cpu_dev default_cpu = { .c_init = default_init, + .c_vendor = "Unknown", }; static struct cpu_dev * this_cpu = &default_cpu; @@ -153,6 +155,7 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) { char *v = c->x86_vendor_id; int i; + static int printed; for (i = 0; i < X86_VENDOR_NUM; i++) { if (cpu_devs[i]) { @@ -162,10 +165,17 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) c->x86_vendor = i; if (!early) this_cpu = cpu_devs[i]; - break; + return; } } } + if (!printed) { + printed++; + printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); + printk(KERN_ERR "CPU: Your system may be unstable.\n"); + } + c->x86_vendor = X86_VENDOR_UNKNOWN; + this_cpu = &default_cpu; } @@ -207,7 +217,10 @@ static int __devinit have_cpuid_p(void) /* Do minimum CPU detection early. Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. - The others are not touched to avoid unwanted side effects. */ + The others are not touched to avoid unwanted side effects. + + WARNING: this function is only called on the BP. Don't add code here + that is supposed to run on all CPUs. */ static void __init early_cpu_detect(void) { struct cpuinfo_x86 *c = &boot_cpu_data; @@ -239,12 +252,6 @@ static void __init early_cpu_detect(void) if (cap0 & (1<<19)) c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; } - - early_intel_workaround(c); - -#ifdef CONFIG_X86_HT - phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff; -#endif } void __devinit generic_identify(struct cpuinfo_x86 * c) @@ -271,10 +278,10 @@ void __devinit generic_identify(struct cpuinfo_x86 * c) c->x86_capability[4] = excap; c->x86 = (tfms >> 8) & 15; c->x86_model = (tfms >> 4) & 15; - if (c->x86 == 0xf) { + if (c->x86 == 0xf) c->x86 += (tfms >> 20) & 0xff; + if (c->x86 >= 0x6) c->x86_model += ((tfms >> 16) & 0xF) << 4; - } c->x86_mask = tfms & 15; } else { /* Have CPUID level 0 only - unheard of */ @@ -292,6 +299,12 @@ void __devinit generic_identify(struct cpuinfo_x86 * c) get_model_name(c); /* Default name */ } } + + early_intel_workaround(c); + +#ifdef CONFIG_X86_HT + phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff; +#endif } static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) @@ -562,8 +575,9 @@ void __devinit cpu_init(void) int cpu = smp_processor_id(); struct tss_struct * t = &per_cpu(init_tss, cpu); struct thread_struct *thread = ¤t->thread; - struct desc_struct *gdt = get_cpu_gdt_table(cpu); + struct desc_struct *gdt; __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu); + struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); if (cpu_test_and_set(cpu, cpu_initialized)) { printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); @@ -581,6 +595,25 @@ void __devinit cpu_init(void) } /* + * This is a horrible hack to allocate the GDT. The problem + * is that cpu_init() is called really early for the boot CPU + * (and hence needs bootmem) but much later for the secondary + * CPUs, when bootmem will have gone away + */ + if (NODE_DATA(0)->bdata->node_bootmem_map) { + gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE); + /* alloc_bootmem_pages panics on failure, so no check */ + memset(gdt, 0, PAGE_SIZE); + } else { + gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL); + if (unlikely(!gdt)) { + printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu); + for (;;) + local_irq_enable(); + } + } + + /* * Initialize the per-CPU GDT with the boot GDT, * and set up the GDT descriptor: */ @@ -592,18 +625,13 @@ void __devinit cpu_init(void) ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) | (CPU_16BIT_STACK_SIZE - 1); - cpu_gdt_descr[cpu].size = GDT_SIZE - 1; - cpu_gdt_descr[cpu].address = (unsigned long)gdt; + cpu_gdt_descr->size = GDT_SIZE - 1; + cpu_gdt_descr->address = (unsigned long)gdt; - load_gdt(&cpu_gdt_descr[cpu]); + load_gdt(cpu_gdt_descr); load_idt(&idt_descr); /* - * Delete NT - */ - __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl"); - - /* * Set up and load the per-CPU TSS and LDT */ atomic_inc(&init_mm.mm_count); @@ -617,8 +645,10 @@ void __devinit cpu_init(void) load_TR_desc(); load_LDT(&init_mm.context); +#ifdef CONFIG_DOUBLEFAULT /* Set up doublefault TSS pointer in the GDT */ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); +#endif /* Clear %fs and %gs. */ asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs"); diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig index 0f1eb50..26892d2 100644 --- a/arch/i386/kernel/cpu/cpufreq/Kconfig +++ b/arch/i386/kernel/cpu/cpufreq/Kconfig @@ -96,6 +96,7 @@ config X86_POWERNOW_K8_ACPI config X86_GX_SUSPMOD tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" + depends on PCI help This add the CPUFreq driver for NatSemi Geode processors which support suspend modulation. diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c index 871366b..3852d0a 100644 --- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -40,8 +40,6 @@ #include <linux/acpi.h> #include <acpi/processor.h> -#include "speedstep-est-common.h" - #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg) MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); @@ -297,68 +295,6 @@ acpi_cpufreq_guess_freq ( } -/* - * acpi_processor_cpu_init_pdc_est - let BIOS know about the SMP capabilities - * of this driver - * @perf: processor-specific acpi_io_data struct - * @cpu: CPU being initialized - * - * To avoid issues with legacy OSes, some BIOSes require to be informed of - * the SMP capabilities of OS P-state driver. Here we set the bits in _PDC - * accordingly, for Enhanced Speedstep. Actual call to _PDC is done in - * driver/acpi/processor.c - */ -static void -acpi_processor_cpu_init_pdc_est( - struct acpi_processor_performance *perf, - unsigned int cpu, - struct acpi_object_list *obj_list - ) -{ - union acpi_object *obj; - u32 *buf; - struct cpuinfo_x86 *c = cpu_data + cpu; - dprintk("acpi_processor_cpu_init_pdc_est\n"); - - if (!cpu_has(c, X86_FEATURE_EST)) - return; - - /* Initialize pdc. It will be used later. */ - if (!obj_list) - return; - - if (!(obj_list->count && obj_list->pointer)) - return; - - obj = obj_list->pointer; - if ((obj->buffer.length == 12) && obj->buffer.pointer) { - buf = (u32 *)obj->buffer.pointer; - buf[0] = ACPI_PDC_REVISION_ID; - buf[1] = 1; - buf[2] = ACPI_PDC_EST_CAPABILITY_SMP; - perf->pdc = obj_list; - } - return; -} - - -/* CPU specific PDC initialization */ -static void -acpi_processor_cpu_init_pdc( - struct acpi_processor_performance *perf, - unsigned int cpu, - struct acpi_object_list *obj_list - ) -{ - struct cpuinfo_x86 *c = cpu_data + cpu; - dprintk("acpi_processor_cpu_init_pdc\n"); - perf->pdc = NULL; - if (cpu_has(c, X86_FEATURE_EST)) - acpi_processor_cpu_init_pdc_est(perf, cpu, obj_list); - return; -} - - static int acpi_cpufreq_cpu_init ( struct cpufreq_policy *policy) @@ -367,15 +303,9 @@ acpi_cpufreq_cpu_init ( unsigned int cpu = policy->cpu; struct cpufreq_acpi_io *data; unsigned int result = 0; - - union acpi_object arg0 = {ACPI_TYPE_BUFFER}; - u32 arg0_buf[3]; - struct acpi_object_list arg_list = {1, &arg0}; + struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; dprintk("acpi_cpufreq_cpu_init\n"); - /* setup arg_list for _PDC settings */ - arg0.buffer.length = 12; - arg0.buffer.pointer = (u8 *) arg0_buf; data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); if (!data) @@ -383,14 +313,12 @@ acpi_cpufreq_cpu_init ( acpi_io_data[cpu] = data; - acpi_processor_cpu_init_pdc(&data->acpi_data, cpu, &arg_list); result = acpi_processor_register_performance(&data->acpi_data, cpu); - data->acpi_data.pdc = NULL; if (result) goto err_free; - if (is_const_loops_cpu(cpu)) { + if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; } diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c index 270f218..cc73a7a 100644 --- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c @@ -52,6 +52,7 @@ enum { static int has_N44_O17_errata[NR_CPUS]; +static int has_N60_errata[NR_CPUS]; static unsigned int stock_freq; static struct cpufreq_driver p4clockmod_driver; static unsigned int cpufreq_p4_get(unsigned int cpu); @@ -226,6 +227,12 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) case 0x0f12: has_N44_O17_errata[policy->cpu] = 1; dprintk("has errata -- disabling low frequencies\n"); + break; + + case 0x0f29: + has_N60_errata[policy->cpu] = 1; + dprintk("has errata -- disabling frequencies lower than 2ghz\n"); + break; } /* get max frequency */ @@ -237,6 +244,8 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { if ((i<2) && (has_N44_O17_errata[policy->cpu])) p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; + else if (has_N60_errata[policy->cpu] && p4clockmod_table[i].frequency < 2000000) + p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; else p4clockmod_table[i].frequency = (stock_freq * i)/8; } diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index 0fbbd4c..e11a092 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c @@ -980,7 +980,7 @@ static int powernowk8_verify(struct cpufreq_policy *pol) } /* per CPU init entry point to the driver */ -static int __init powernowk8_cpu_init(struct cpufreq_policy *pol) +static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) { struct powernow_k8_data *data; cpumask_t oldmask = CPU_MASK_ALL; @@ -1141,7 +1141,7 @@ static struct cpufreq_driver cpufreq_amd64_driver = { }; /* driver entry point for init */ -static int __init powernowk8_init(void) +static int __cpuinit powernowk8_init(void) { unsigned int i, supported_cpus = 0; diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c index edb9873..c173c0f 100644 --- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c @@ -35,8 +35,6 @@ #include <asm/processor.h> #include <asm/cpufeature.h> -#include "speedstep-est-common.h" - #define PFX "speedstep-centrino: " #define MAINTAINER "Jeremy Fitzhardinge <jeremy@goop.org>" @@ -364,22 +362,10 @@ static struct acpi_processor_performance p; */ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy) { - union acpi_object arg0 = {ACPI_TYPE_BUFFER}; - u32 arg0_buf[3]; - struct acpi_object_list arg_list = {1, &arg0}; unsigned long cur_freq; int result = 0, i; unsigned int cpu = policy->cpu; - /* _PDC settings */ - arg0.buffer.length = 12; - arg0.buffer.pointer = (u8 *) arg0_buf; - arg0_buf[0] = ACPI_PDC_REVISION_ID; - arg0_buf[1] = 1; - arg0_buf[2] = ACPI_PDC_EST_CAPABILITY_SMP_MSR; - - p.pdc = &arg_list; - /* register with ACPI core */ if (acpi_processor_register_performance(&p, cpu)) { dprintk(KERN_INFO PFX "obtaining ACPI data failed\n"); @@ -493,12 +479,13 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) unsigned l, h; int ret; int i; + struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; /* Only Intel makes Enhanced Speedstep-capable CPUs */ if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST)) return -ENODEV; - if (is_const_loops_cpu(policy->cpu)) { + if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { centrino_driver.flags |= CPUFREQ_CONST_LOOPS; } diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-est-common.h b/arch/i386/kernel/cpu/cpufreq/speedstep-est-common.h deleted file mode 100644 index 5ce995c..0000000 --- a/arch/i386/kernel/cpu/cpufreq/speedstep-est-common.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Routines common for drivers handling Enhanced Speedstep Technology - * Copyright (C) 2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> - * - * Licensed under the terms of the GNU GPL License version 2 -- see - * COPYING for details. - */ - -static inline int is_const_loops_cpu(unsigned int cpu) -{ - struct cpuinfo_x86 *c = cpu_data + cpu; - - if (c->x86_vendor != X86_VENDOR_INTEL || !cpu_has(c, X86_FEATURE_EST)) - return 0; - - /* - * on P-4s, the TSC runs with constant frequency independent of cpu freq - * when we use EST - */ - if (c->x86 == 0xf) - return 1; - - return 0; -} - diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c index ff87cc2..00f2e05 100644 --- a/arch/i386/kernel/cpu/cyrix.c +++ b/arch/i386/kernel/cpu/cyrix.c @@ -343,6 +343,31 @@ static void __init init_cyrix(struct cpuinfo_x86 *c) } /* + * Handle National Semiconductor branded processors + */ +static void __init init_nsc(struct cpuinfo_x86 *c) +{ + /* There may be GX1 processors in the wild that are branded + * NSC and not Cyrix. + * + * This function only handles the GX processor, and kicks every + * thing else to the Cyrix init function above - that should + * cover any processors that might have been branded differently + * after NSC aquired Cyrix. + * + * If this breaks your GX1 horribly, please e-mail + * info-linux@ldcmail.amd.com to tell us. + */ + + /* Handle the GX (Formally known as the GX2) */ + + if (c->x86 == 5 && c->x86_model == 5) + display_cacheinfo(c); + else + init_cyrix(c); +} + +/* * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected * by the fact that they preserve the flags across the division of 5/2. * PII and PPro exhibit this behavior too, but they have cpuid available. @@ -419,10 +444,18 @@ int __init cyrix_init_cpu(void) //early_arch_initcall(cyrix_init_cpu); +static int __init cyrix_exit_cpu(void) +{ + cpu_devs[X86_VENDOR_CYRIX] = NULL; + return 0; +} + +late_initcall(cyrix_exit_cpu); + static struct cpu_dev nsc_cpu_dev __initdata = { .c_vendor = "NSC", .c_ident = { "Geode by NSC" }, - .c_init = init_cyrix, + .c_init = init_nsc, .c_identify = generic_identify, }; @@ -433,3 +466,11 @@ int __init nsc_init_cpu(void) } //early_arch_initcall(nsc_init_cpu); + +static int __init nsc_exit_cpu(void) +{ + cpu_devs[X86_VENDOR_NSC] = NULL; + return 0; +} + +late_initcall(nsc_exit_cpu); diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c index 5e2da70..8c01201 100644 --- a/arch/i386/kernel/cpu/intel.c +++ b/arch/i386/kernel/cpu/intel.c @@ -183,10 +183,13 @@ static void __devinit init_intel(struct cpuinfo_x86 *c) } #endif - if (c->x86 == 15) + if (c->x86 == 15) set_bit(X86_FEATURE_P4, c->x86_capability); if (c->x86 == 6) set_bit(X86_FEATURE_P3, c->x86_capability); + if ((c->x86 == 0xf && c->x86_model >= 0x03) || + (c->x86 == 0x6 && c->x86_model >= 0x0e)) + set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); } diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index fbfd374..ffe58ce 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c @@ -43,13 +43,23 @@ static struct _cache_table cache_table[] __cpuinitdata = { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */ { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */ { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */ + { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */ { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */ { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */ + { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */ + { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */ { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */ + { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */ + { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */ + { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */ + { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */ + { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */ + { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */ + { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */ { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */ @@ -57,6 +67,7 @@ static struct _cache_table cache_table[] __cpuinitdata = { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */ { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */ { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */ + { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */ { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */ { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */ @@ -141,6 +152,7 @@ static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_le return 0; } +/* will only be called once; __init is safe here */ static int __init find_num_cache_leaves(void) { unsigned int eax, ebx, ecx, edx; diff --git a/arch/i386/kernel/cpu/mtrr/changelog b/arch/i386/kernel/cpu/mtrr/changelog deleted file mode 100644 index af13685..0000000 --- a/arch/i386/kernel/cpu/mtrr/changelog +++ /dev/null @@ -1,229 +0,0 @@ - ChangeLog - - Prehistory Martin Tischhäuser <martin@ikcbarka.fzk.de> - Initial register-setting code (from proform-1.0). - 19971216 Richard Gooch <rgooch@atnf.csiro.au> - Original version for /proc/mtrr interface, SMP-safe. - v1.0 - 19971217 Richard Gooch <rgooch@atnf.csiro.au> - Bug fix for ioctls()'s. - Added sample code in Documentation/mtrr.txt - v1.1 - 19971218 Richard Gooch <rgooch@atnf.csiro.au> - Disallow overlapping regions. - 19971219 Jens Maurer <jmaurer@menuett.rhein-main.de> - Register-setting fixups. - v1.2 - 19971222 Richard Gooch <rgooch@atnf.csiro.au> - Fixups for kernel 2.1.75. - v1.3 - 19971229 David Wragg <dpw@doc.ic.ac.uk> - Register-setting fixups and conformity with Intel conventions. - 19971229 Richard Gooch <rgooch@atnf.csiro.au> - Cosmetic changes and wrote this ChangeLog ;-) - 19980106 Richard Gooch <rgooch@atnf.csiro.au> - Fixups for kernel 2.1.78. - v1.4 - 19980119 David Wragg <dpw@doc.ic.ac.uk> - Included passive-release enable code (elsewhere in PCI setup). - v1.5 - 19980131 Richard Gooch <rgooch@atnf.csiro.au> - Replaced global kernel lock with private spinlock. - v1.6 - 19980201 Richard Gooch <rgooch@atnf.csiro.au> - Added wait for other CPUs to complete changes. - v1.7 - 19980202 Richard Gooch <rgooch@atnf.csiro.au> - Bug fix in definition of <set_mtrr> for UP. - v1.8 - 19980319 Richard Gooch <rgooch@atnf.csiro.au> - Fixups for kernel 2.1.90. - 19980323 Richard Gooch <rgooch@atnf.csiro.au> - Move SMP BIOS fixup before secondary CPUs call <calibrate_delay> - v1.9 - 19980325 Richard Gooch <rgooch@atnf.csiro.au> - Fixed test for overlapping regions: confused by adjacent regions - 19980326 Richard Gooch <rgooch@atnf.csiro.au> - Added wbinvd in <set_mtrr_prepare>. - 19980401 Richard Gooch <rgooch@atnf.csiro.au> - Bug fix for non-SMP compilation. - 19980418 David Wragg <dpw@doc.ic.ac.uk> - Fixed-MTRR synchronisation for SMP and use atomic operations - instead of spinlocks. - 19980418 Richard Gooch <rgooch@atnf.csiro.au> - Differentiate different MTRR register classes for BIOS fixup. - v1.10 - 19980419 David Wragg <dpw@doc.ic.ac.uk> - Bug fix in variable MTRR synchronisation. - v1.11 - 19980419 Richard Gooch <rgooch@atnf.csiro.au> - Fixups for kernel 2.1.97. - v1.12 - 19980421 Richard Gooch <rgooch@atnf.csiro.au> - Safer synchronisation across CPUs when changing MTRRs. - v1.13 - 19980423 Richard Gooch <rgooch@atnf.csiro.au> - Bugfix for SMP systems without MTRR support. - v1.14 - 19980427 Richard Gooch <rgooch@atnf.csiro.au> - Trap calls to <mtrr_add> and <mtrr_del> on non-MTRR machines. - v1.15 - 19980427 Richard Gooch <rgooch@atnf.csiro.au> - Use atomic bitops for setting SMP change mask. - v1.16 - 19980428 Richard Gooch <rgooch@atnf.csiro.au> - Removed spurious diagnostic message. - v1.17 - 19980429 Richard Gooch <rgooch@atnf.csiro.au> - Moved register-setting macros into this file. - Moved setup code from init/main.c to i386-specific areas. - v1.18 - 19980502 Richard Gooch <rgooch@atnf.csiro.au> - Moved MTRR detection outside conditionals in <mtrr_init>. - v1.19 - 19980502 Richard Gooch <rgooch@atnf.csiro.au> - Documentation improvement: mention Pentium II and AGP. - v1.20 - 19980521 Richard Gooch <rgooch@atnf.csiro.au> - Only manipulate interrupt enable flag on local CPU. - Allow enclosed uncachable regions. - v1.21 - 19980611 Richard Gooch <rgooch@atnf.csiro.au> - Always define <main_lock>. - v1.22 - 19980901 Richard Gooch <rgooch@atnf.csiro.au> - Removed module support in order to tidy up code. - Added sanity check for <mtrr_add>/<mtrr_del> before <mtrr_init>. - Created addition queue for prior to SMP commence. - v1.23 - 19980902 Richard Gooch <rgooch@atnf.csiro.au> - Ported patch to kernel 2.1.120-pre3. - v1.24 - 19980910 Richard Gooch <rgooch@atnf.csiro.au> - Removed sanity checks and addition queue: Linus prefers an OOPS. - v1.25 - 19981001 Richard Gooch <rgooch@atnf.csiro.au> - Fixed harmless compiler warning in include/asm-i386/mtrr.h - Fixed version numbering and history for v1.23 -> v1.24. - v1.26 - 19990118 Richard Gooch <rgooch@atnf.csiro.au> - Added devfs support. - v1.27 - 19990123 Richard Gooch <rgooch@atnf.csiro.au> - Changed locking to spin with reschedule. - Made use of new <smp_call_function>. - v1.28 - 19990201 Zoltán Böszörményi <zboszor@mail.externet.hu> - Extended the driver to be able to use Cyrix style ARRs. - 19990204 Richard Gooch <rgooch@atnf.csiro.au> - Restructured Cyrix support. - v1.29 - 19990204 Zoltán Böszörményi <zboszor@mail.externet.hu> - Refined ARR support: enable MAPEN in set_mtrr_prepare() - and disable MAPEN in set_mtrr_done(). - 19990205 Richard Gooch <rgooch@atnf.csiro.au> - Minor cleanups. - v1.30 - 19990208 Zoltán Böszörményi <zboszor@mail.externet.hu> - Protect plain 6x86s (and other processors without the - Page Global Enable feature) against accessing CR4 in - set_mtrr_prepare() and set_mtrr_done(). - 19990210 Richard Gooch <rgooch@atnf.csiro.au> - Turned <set_mtrr_up> and <get_mtrr> into function pointers. - v1.31 - 19990212 Zoltán Böszörményi <zboszor@mail.externet.hu> - Major rewrite of cyrix_arr_init(): do not touch ARRs, - leave them as the BIOS have set them up. - Enable usage of all 8 ARRs. - Avoid multiplications by 3 everywhere and other - code clean ups/speed ups. - 19990213 Zoltán Böszörményi <zboszor@mail.externet.hu> - Set up other Cyrix processors identical to the boot cpu. - Since Cyrix don't support Intel APIC, this is l'art pour l'art. - Weigh ARRs by size: - If size <= 32M is given, set up ARR# we were given. - If size > 32M is given, set up ARR7 only if it is free, - fail otherwise. - 19990214 Zoltán Böszörményi <zboszor@mail.externet.hu> - Also check for size >= 256K if we are to set up ARR7, - mtrr_add() returns the value it gets from set_mtrr() - 19990218 Zoltán Böszörményi <zboszor@mail.externet.hu> - Remove Cyrix "coma bug" workaround from here. - Moved to linux/arch/i386/kernel/setup.c and - linux/include/asm-i386/bugs.h - 19990228 Richard Gooch <rgooch@atnf.csiro.au> - Added MTRRIOC_KILL_ENTRY ioctl(2) - Trap for counter underflow in <mtrr_file_del>. - Trap for 4 MiB aligned regions for PPro, stepping <= 7. - 19990301 Richard Gooch <rgooch@atnf.csiro.au> - Created <get_free_region> hook. - 19990305 Richard Gooch <rgooch@atnf.csiro.au> - Temporarily disable AMD support now MTRR capability flag is set. - v1.32 - 19990308 Zoltán Böszörményi <zboszor@mail.externet.hu> - Adjust my changes (19990212-19990218) to Richard Gooch's - latest changes. (19990228-19990305) - v1.33 - 19990309 Richard Gooch <rgooch@atnf.csiro.au> - Fixed typo in <printk> message. - 19990310 Richard Gooch <rgooch@atnf.csiro.au> - Support K6-II/III based on Alan Cox's <alan@redhat.com> patches. - v1.34 - 19990511 Bart Hartgers <bart@etpmod.phys.tue.nl> - Support Centaur C6 MCR's. - 19990512 Richard Gooch <rgooch@atnf.csiro.au> - Minor cleanups. - v1.35 - 19990707 Zoltán Böszörményi <zboszor@mail.externet.hu> - Check whether ARR3 is protected in cyrix_get_free_region() - and mtrr_del(). The code won't attempt to delete or change it - from now on if the BIOS protected ARR3. It silently skips ARR3 - in cyrix_get_free_region() or returns with an error code from - mtrr_del(). - 19990711 Zoltán Böszörményi <zboszor@mail.externet.hu> - Reset some bits in the CCRs in cyrix_arr_init() to disable SMM - if ARR3 isn't protected. This is needed because if SMM is active - and ARR3 isn't protected then deleting and setting ARR3 again - may lock up the processor. With SMM entirely disabled, it does - not happen. - 19990812 Zoltán Böszörményi <zboszor@mail.externet.hu> - Rearrange switch() statements so the driver accomodates to - the fact that the AMD Athlon handles its MTRRs the same way - as Intel does. - 19990814 Zoltán Böszörményi <zboszor@mail.externet.hu> - Double check for Intel in mtrr_add()'s big switch() because - that revision check is only valid for Intel CPUs. - 19990819 Alan Cox <alan@redhat.com> - Tested Zoltan's changes on a pre production Athlon - 100% - success. - 19991008 Manfred Spraul <manfreds@colorfullife.com> - replaced spin_lock_reschedule() with a normal semaphore. - v1.36 - 20000221 Richard Gooch <rgooch@atnf.csiro.au> - Compile fix if procfs and devfs not enabled. - Formatting changes. - v1.37 - 20001109 H. Peter Anvin <hpa@zytor.com> - Use the new centralized CPU feature detects. - - v1.38 - 20010309 Dave Jones <davej@suse.de> - Add support for Cyrix III. - - v1.39 - 20010312 Dave Jones <davej@suse.de> - Ugh, I broke AMD support. - Reworked fix by Troels Walsted Hansen <troels@thule.no> - - v1.40 - 20010327 Dave Jones <davej@suse.de> - Adapted Cyrix III support to include VIA C3. - - v2.0 - 20020306 Patrick Mochel <mochel@osdl.org> - Split mtrr.c -> mtrr/*.c - Converted to Linux Kernel Coding Style - Fixed several minor nits in form - Moved some SMP-only functions out, so they can be used - for power management in the future. - TODO: Fix user interface cruft. diff --git a/arch/i386/kernel/cpu/mtrr/if.c b/arch/i386/kernel/cpu/mtrr/if.c index cf39e20..5ac051b 100644 --- a/arch/i386/kernel/cpu/mtrr/if.c +++ b/arch/i386/kernel/cpu/mtrr/if.c @@ -1,5 +1,6 @@ #include <linux/init.h> #include <linux/proc_fs.h> +#include <linux/capability.h> #include <linux/ctype.h> #include <linux/module.h> #include <linux/seq_file.h> diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c index 1e9db19..3b4618b 100644 --- a/arch/i386/kernel/cpu/mtrr/main.c +++ b/arch/i386/kernel/cpu/mtrr/main.c @@ -44,12 +44,10 @@ #include <asm/msr.h> #include "mtrr.h" -#define MTRR_VERSION "2.0 (20020519)" - u32 num_var_ranges = 0; unsigned int *usage_table; -static DECLARE_MUTEX(main_lock); +static DECLARE_MUTEX(mtrr_sem); u32 size_or_mask, size_and_mask; @@ -335,7 +333,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, /* No CPU hotplug when we change MTRR entries */ lock_cpu_hotplug(); /* Search for existing MTRR */ - down(&main_lock); + down(&mtrr_sem); for (i = 0; i < num_var_ranges; ++i) { mtrr_if->get(i, &lbase, &lsize, <ype); if (base >= lbase + lsize) @@ -373,7 +371,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, printk(KERN_INFO "mtrr: no more MTRRs available\n"); error = i; out: - up(&main_lock); + up(&mtrr_sem); unlock_cpu_hotplug(); return error; } @@ -466,7 +464,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) max = num_var_ranges; /* No CPU hotplug when we change MTRR entries */ lock_cpu_hotplug(); - down(&main_lock); + down(&mtrr_sem); if (reg < 0) { /* Search for existing MTRR */ for (i = 0; i < max; ++i) { @@ -505,7 +503,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) set_mtrr(reg, 0, 0, 0); error = reg; out: - up(&main_lock); + up(&mtrr_sem); unlock_cpu_hotplug(); return error; } @@ -671,7 +669,6 @@ void __init mtrr_bp_init(void) break; } } - printk(KERN_INFO "mtrr: v%s\n",MTRR_VERSION); if (mtrr_if) { set_num_var_ranges(); @@ -688,7 +685,7 @@ void mtrr_ap_init(void) if (!mtrr_if || !use_intel()) return; /* - * Ideally we should hold main_lock here to avoid mtrr entries changed, + * Ideally we should hold mtrr_sem here to avoid mtrr entries changed, * but this routine will be called in cpu boot time, holding the lock * breaks it. This routine is called in two cases: 1.very earily time * of software resume, when there absolutely isn't mtrr entry changes; diff --git a/arch/i386/kernel/cpu/nexgen.c b/arch/i386/kernel/cpu/nexgen.c index 30898a2..ad87fa5 100644 --- a/arch/i386/kernel/cpu/nexgen.c +++ b/arch/i386/kernel/cpu/nexgen.c @@ -61,3 +61,11 @@ int __init nexgen_init_cpu(void) } //early_arch_initcall(nexgen_init_cpu); + +static int __init nexgen_exit_cpu(void) +{ + cpu_devs[X86_VENDOR_NEXGEN] = NULL; + return 0; +} + +late_initcall(nexgen_exit_cpu); diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c index 6d91b27..89a85af 100644 --- a/arch/i386/kernel/cpu/proc.c +++ b/arch/i386/kernel/cpu/proc.c @@ -29,7 +29,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL, - NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow", + NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow", /* Transmeta-defined */ "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL, @@ -40,7 +40,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) /* Other (Linux-defined) */ "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + "constant_tsc", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -57,11 +57,21 @@ static int show_cpuinfo(struct seq_file *m, void *v) NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* AMD-defined (#2) */ - "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL, + "lahf_lm", "cmp_legacy", "svm", NULL, "cr8legacy", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; + static char *x86_power_flags[] = { + "ts", /* temperature sensor */ + "fid", /* frequency id control */ + "vid", /* voltage id control */ + "ttp", /* thermal trip */ + "tm", + "stc", + NULL, + /* nothing */ /* constant_tsc - moved to flags */ + }; struct cpuinfo_x86 *c = v; int i, n = c - cpu_data; int fpu_exception; @@ -131,6 +141,17 @@ static int show_cpuinfo(struct seq_file *m, void *v) x86_cap_flags[i] != NULL ) seq_printf(m, " %s", x86_cap_flags[i]); + for (i = 0; i < 32; i++) + if (c->x86_power & (1 << i)) { + if (i < ARRAY_SIZE(x86_power_flags) && + x86_power_flags[i]) + seq_printf(m, "%s%s", + x86_power_flags[i][0]?" ":"", + x86_power_flags[i]); + else + seq_printf(m, " [%d]", i); + } + seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n", c->loops_per_jiffy/(500000/HZ), (c->loops_per_jiffy/(5000/HZ)) % 100); diff --git a/arch/i386/kernel/cpu/rise.c b/arch/i386/kernel/cpu/rise.c index 8602425..d08d5a2 100644 --- a/arch/i386/kernel/cpu/rise.c +++ b/arch/i386/kernel/cpu/rise.c @@ -51,3 +51,11 @@ int __init rise_init_cpu(void) } //early_arch_initcall(rise_init_cpu); + +static int __init rise_exit_cpu(void) +{ + cpu_devs[X86_VENDOR_RISE] = NULL; + return 0; +} + +late_initcall(rise_exit_cpu); diff --git a/arch/i386/kernel/cpu/transmeta.c b/arch/i386/kernel/cpu/transmeta.c index fc42638..7214c9b 100644 --- a/arch/i386/kernel/cpu/transmeta.c +++ b/arch/i386/kernel/cpu/transmeta.c @@ -1,4 +1,5 @@ #include <linux/kernel.h> +#include <linux/mm.h> #include <linux/init.h> #include <asm/processor.h> #include <asm/msr.h> @@ -84,7 +85,7 @@ static void __init init_transmeta(struct cpuinfo_x86 *c) #endif } -static void transmeta_identify(struct cpuinfo_x86 * c) +static void __init transmeta_identify(struct cpuinfo_x86 * c) { u32 xlvl; generic_identify(c); @@ -111,3 +112,11 @@ int __init transmeta_init_cpu(void) } //early_arch_initcall(transmeta_init_cpu); + +static int __init transmeta_exit_cpu(void) +{ + cpu_devs[X86_VENDOR_TRANSMETA] = NULL; + return 0; +} + +late_initcall(transmeta_exit_cpu); diff --git a/arch/i386/kernel/cpu/umc.c b/arch/i386/kernel/cpu/umc.c index 264fcad..2cd988f 100644 --- a/arch/i386/kernel/cpu/umc.c +++ b/arch/i386/kernel/cpu/umc.c @@ -31,3 +31,11 @@ int __init umc_init_cpu(void) } //early_arch_initcall(umc_init_cpu); + +static int __init umc_exit_cpu(void) +{ + cpu_devs[X86_VENDOR_UMC] = NULL; + return 0; +} + +late_initcall(umc_exit_cpu); |