static unsigned long smp_changes_mask;
static struct mtrr_state mtrr_state = {};
static int mtrr_state_set;
+static u64 tom2;
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "mtrr."
}
}
+ if (tom2) {
+ if (start >= (1ULL<<32) && (end < tom2))
+ return MTRR_TYPE_WRBACK;
+ }
+
if (prev_match != 0xFF)
return prev_match;
mtrr_state.def_type = (lo & 0xff);
mtrr_state.enabled = (lo & 0xc00) >> 10;
+ if (amd_special_default_mtrr()) {
+ unsigned lo, hi;
+ /* TOP_MEM2 */
+ rdmsr(MSR_K8_TOP_MEM2, lo, hi);
+ tom2 = hi;
+ tom2 <<= 32;
+ tom2 |= lo;
+ tom2 &= 0xffffff8000000ULL;
+ }
if (mtrr_show) {
int high_width;
else
printk(KERN_INFO "MTRR %u disabled\n", i);
}
+ if (tom2)
+ printk(KERN_INFO "TOM2: %016lx aka %ldM\n", tom2, tom2>>20);
}
mtrr_state_set = 1;
#define Tom2Enabled (1U << 21)
#define Tom2ForceMemTypeWB (1U << 22)
-static __init int amd_special_default_mtrr(void)
+int __init amd_special_default_mtrr(void)
{
u32 l, h;
return 1;
}
}
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 >= 0xf && boot_cpu_data.x86 <= 0x11) {
+ if (cpu_has_pat) {
+ return 1;
+ }
+ }
pat_wc_enabled = 0;
printk(KERN_INFO "CPU and/or kernel does not support PAT.\n");
extern void mtrr_ap_init(void);
extern void mtrr_bp_init(void);
extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
+extern int amd_special_default_mtrr(void);
# else
static inline u8 mtrr_type_lookup(u64 addr, u64 end)
{