patch-2.3.43 linux/include/asm-i386/mmu_context.h
Next file: linux/include/asm-i386/page.h
Previous file: linux/include/asm-i386/md.h
Back to the patch index
Back to the overall index
- Lines: 60
- Date:
Wed Feb 9 21:33:07 2000
- Orig file:
v2.3.42/linux/include/asm-i386/mmu_context.h
- Orig date:
Tue Dec 7 09:32:49 1999
diff -u --recursive --new-file v2.3.42/linux/include/asm-i386/mmu_context.h linux/include/asm-i386/mmu_context.h
@@ -1,6 +1,7 @@
#ifndef __I386_MMU_CONTEXT_H
#define __I386_MMU_CONTEXT_H
+#include <linux/config.h>
#include <asm/desc.h>
#include <asm/atomic.h>
#include <asm/pgalloc.h>
@@ -12,30 +13,46 @@
#define init_new_context(tsk,mm) do { } while (0)
#ifdef __SMP__
-extern unsigned int cpu_tlbbad[NR_CPUS];
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+ if(cpu_tlbstate[cpu].state == TLBSTATE_OK)
+ cpu_tlbstate[cpu].state = TLBSTATE_LAZY;
+}
+#else
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
#endif
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
{
+ set_bit(cpu, &next->cpu_vm_mask);
if (prev != next) {
/*
* Re-load LDT if necessary
*/
if (prev->segments != next->segments)
load_LDT(next);
-
+#ifdef CONFIG_SMP
+ cpu_tlbstate[cpu].state = TLBSTATE_OK;
+ cpu_tlbstate[cpu].active_mm = next;
+#endif
/* Re-load page tables */
asm volatile("movl %0,%%cr3": :"r" (__pa(next->pgd)));
clear_bit(cpu, &prev->cpu_vm_mask);
}
#ifdef __SMP__
else {
- if(cpu_tlbbad[cpu])
+ int old_state = cpu_tlbstate[cpu].state;
+ cpu_tlbstate[cpu].state = TLBSTATE_OK;
+ if(cpu_tlbstate[cpu].active_mm != next)
+ BUG();
+ if(old_state == TLBSTATE_OLD)
local_flush_tlb();
}
- cpu_tlbbad[cpu] = 0;
+
#endif
- set_bit(cpu, &next->cpu_vm_mask);
}
#define activate_mm(prev, next) \
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)