Skip to content
Snippets Groups Projects
Commit fd5cea02 authored by Cyrill Gorcunov's avatar Cyrill Gorcunov Committed by Thomas Gleixner
Browse files

x86: nmi_32/64.c - add helper functions to hide arch specific data


Signed-off-by: default avatarCyrill Gorcunov <gorcunov@gmail.com>
Cc: hpa@zytor.com
Cc: mingo@redhat.com
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 7c2ba83f
No related branches found
No related tags found
No related merge requests found
...@@ -51,6 +51,26 @@ static DEFINE_PER_CPU(short, wd_enabled); ...@@ -51,6 +51,26 @@ static DEFINE_PER_CPU(short, wd_enabled);
static int endflag __initdata = 0; static int endflag __initdata = 0;
static inline unsigned int get_nmi_count(int cpu)
{
return nmi_count(cpu);
}
static inline int mce_in_progress(void)
{
return 0;
}
/*
* Take the local apic timer and PIT/HPET into account. We don't
* know which one is active, when we have highres/dyntick on
*/
static inline unsigned int get_timer_irqs(int cpu)
{
return per_cpu(irq_stat, cpu).apic_timer_irqs +
per_cpu(irq_stat, cpu).irq0_irqs;
}
/* Run after command line and cpu_init init, but before all other checks */ /* Run after command line and cpu_init init, but before all other checks */
void nmi_watchdog_default(void) void nmi_watchdog_default(void)
{ {
...@@ -104,19 +124,19 @@ int __init check_nmi_watchdog(void) ...@@ -104,19 +124,19 @@ int __init check_nmi_watchdog(void)
#endif #endif
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
prev_nmi_count[cpu] = nmi_count(cpu); prev_nmi_count[cpu] = get_nmi_count(cpu);
local_irq_enable(); local_irq_enable();
mdelay((20*1000)/nmi_hz); // wait 20 ticks mdelay((20*1000)/nmi_hz); // wait 20 ticks
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (!per_cpu(wd_enabled, cpu)) if (!per_cpu(wd_enabled, cpu))
continue; continue;
if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
printk(KERN_WARNING "WARNING: CPU#%d: NMI " printk(KERN_WARNING "WARNING: CPU#%d: NMI "
"appears to be stuck (%d->%d)!\n", "appears to be stuck (%d->%d)!\n",
cpu, cpu,
prev_nmi_count[cpu], prev_nmi_count[cpu],
nmi_count(cpu)); get_nmi_count(cpu));
per_cpu(wd_enabled, cpu) = 0; per_cpu(wd_enabled, cpu) = 0;
atomic_dec(&nmi_active); atomic_dec(&nmi_active);
} }
...@@ -355,6 +375,13 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) ...@@ -355,6 +375,13 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
touched = 1; touched = 1;
} }
sum = get_timer_irqs(cpu);
if (__get_cpu_var(nmi_touch)) {
__get_cpu_var(nmi_touch) = 0;
touched = 1;
}
if (cpu_isset(cpu, backtrace_mask)) { if (cpu_isset(cpu, backtrace_mask)) {
static DEFINE_SPINLOCK(lock); /* Serialise the printks */ static DEFINE_SPINLOCK(lock); /* Serialise the printks */
...@@ -365,16 +392,9 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) ...@@ -365,16 +392,9 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
cpu_clear(cpu, backtrace_mask); cpu_clear(cpu, backtrace_mask);
} }
/* /* Could check oops_in_progress here too, but it's safer not to */
* Take the local apic timer and PIT/HPET into account. We don't if (mce_in_progress())
* know which one is active, when we have highres/dyntick on
*/
sum = per_cpu(irq_stat, cpu).apic_timer_irqs +
per_cpu(irq_stat, cpu).irq0_irqs;
if (__get_cpu_var(nmi_touch)) {
__get_cpu_var(nmi_touch) = 0;
touched = 1; touched = 1;
}
/* if the none of the timers isn't firing, this cpu isn't doing much */ /* if the none of the timers isn't firing, this cpu isn't doing much */
if (!touched && __get_cpu_var(last_irq_sum) == sum) { if (!touched && __get_cpu_var(last_irq_sum) == sum) {
......
...@@ -47,6 +47,30 @@ static unsigned int nmi_hz = HZ; ...@@ -47,6 +47,30 @@ static unsigned int nmi_hz = HZ;
static DEFINE_PER_CPU(short, wd_enabled); static DEFINE_PER_CPU(short, wd_enabled);
static int endflag __initdata = 0;
static inline unsigned int get_nmi_count(int cpu)
{
return cpu_pda(cpu)->__nmi_count;
}
static inline int mce_in_progress(void)
{
#ifdef CONFIG_X86_MCE
return atomic_read(&mce_entry) > 0;
#endif
return 0;
}
/*
* Take the local apic timer and PIT/HPET into account. We don't
* know which one is active, when we have highres/dyntick on
*/
static inline unsigned int get_timer_irqs(int cpu)
{
return read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
}
/* Run after command line and cpu_init init, but before all other checks */ /* Run after command line and cpu_init init, but before all other checks */
void nmi_watchdog_default(void) void nmi_watchdog_default(void)
{ {
...@@ -55,8 +79,6 @@ void nmi_watchdog_default(void) ...@@ -55,8 +79,6 @@ void nmi_watchdog_default(void)
nmi_watchdog = NMI_NONE; nmi_watchdog = NMI_NONE;
} }
static int endflag __initdata = 0;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* The performance counters used by NMI_LOCAL_APIC don't trigger when /* The performance counters used by NMI_LOCAL_APIC don't trigger when
* the CPU is idle. To make sure the NMI watchdog really ticks on all * the CPU is idle. To make sure the NMI watchdog really ticks on all
...@@ -99,19 +121,19 @@ int __init check_nmi_watchdog(void) ...@@ -99,19 +121,19 @@ int __init check_nmi_watchdog(void)
#endif #endif
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
prev_nmi_count[cpu] = cpu_pda(cpu)->__nmi_count; prev_nmi_count[cpu] = get_nmi_count(cpu);
local_irq_enable(); local_irq_enable();
mdelay((20*1000)/nmi_hz); // wait 20 ticks mdelay((20*1000)/nmi_hz); // wait 20 ticks
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (!per_cpu(wd_enabled, cpu)) if (!per_cpu(wd_enabled, cpu))
continue; continue;
if (cpu_pda(cpu)->__nmi_count - prev_nmi_count[cpu] <= 5) { if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
printk(KERN_WARNING "WARNING: CPU#%d: NMI " printk(KERN_WARNING "WARNING: CPU#%d: NMI "
"appears to be stuck (%d->%d)!\n", "appears to be stuck (%d->%d)!\n",
cpu, cpu,
prev_nmi_count[cpu], prev_nmi_count[cpu],
cpu_pda(cpu)->__nmi_count); get_nmi_count(cpu));
per_cpu(wd_enabled, cpu) = 0; per_cpu(wd_enabled, cpu) = 0;
atomic_dec(&nmi_active); atomic_dec(&nmi_active);
} }
...@@ -327,7 +349,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) ...@@ -327,7 +349,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
touched = 1; touched = 1;
} }
sum = read_pda(apic_timer_irqs) + read_pda(irq0_irqs); sum = get_timer_irqs(cpu);
if (__get_cpu_var(nmi_touch)) { if (__get_cpu_var(nmi_touch)) {
__get_cpu_var(nmi_touch) = 0; __get_cpu_var(nmi_touch) = 0;
touched = 1; touched = 1;
...@@ -343,12 +366,9 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) ...@@ -343,12 +366,9 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
cpu_clear(cpu, backtrace_mask); cpu_clear(cpu, backtrace_mask);
} }
#ifdef CONFIG_X86_MCE if (mce_in_progress())
/* Could check oops_in_progress here too, but it's safer
not too */
if (atomic_read(&mce_entry) > 0)
touched = 1; touched = 1;
#endif
/* if the apic timer isn't firing, this cpu isn't doing much */ /* if the apic timer isn't firing, this cpu isn't doing much */
if (!touched && __get_cpu_var(last_irq_sum) == sum) { if (!touched && __get_cpu_var(last_irq_sum) == sum) {
/* /*
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment