patch-2.4.22 linux-2.4.22/include/asm-ia64/system.h

Next file: linux-2.4.22/include/asm-ia64/unaligned.h
Previous file: linux-2.4.22/include/asm-ia64/sn/xtalk/xwidget.h
Back to the patch index
Back to the overall index

diff -urN linux-2.4.21/include/asm-ia64/system.h linux-2.4.22/include/asm-ia64/system.h
@@ -213,172 +213,9 @@
 # define restore_flags(flags)	__restore_flags(flags)
 #endif /* !CONFIG_SMP */
 
-/*
- * Force an unresolved reference if someone tries to use
- * ia64_fetch_and_add() with a bad value.
- */
-extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
-extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
-
-#define IA64_FETCHADD(tmp,v,n,sz)						\
-({										\
-	switch (sz) {								\
-	      case 4:								\
-		__asm__ __volatile__ ("fetchadd4.rel %0=[%1],%2"		\
-				      : "=r"(tmp) : "r"(v), "i"(n) : "memory");	\
-		break;								\
-										\
-	      case 8:								\
-		__asm__ __volatile__ ("fetchadd8.rel %0=[%1],%2"		\
-				      : "=r"(tmp) : "r"(v), "i"(n) : "memory");	\
-		break;								\
-										\
-	      default:								\
-		__bad_size_for_ia64_fetch_and_add();				\
-	}									\
-})
-
-#define ia64_fetch_and_add(i,v)							\
-({										\
-	__u64 _tmp;								\
-	volatile __typeof__(*(v)) *_v = (v);					\
-	switch (i) {								\
-	      case -16:	IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); break;	\
-	      case  -8:	IA64_FETCHADD(_tmp, _v,  -8, sizeof(*(v))); break;	\
-	      case  -4:	IA64_FETCHADD(_tmp, _v,  -4, sizeof(*(v))); break;	\
-	      case  -1:	IA64_FETCHADD(_tmp, _v,  -1, sizeof(*(v))); break;	\
-	      case   1:	IA64_FETCHADD(_tmp, _v,   1, sizeof(*(v))); break;	\
-	      case   4:	IA64_FETCHADD(_tmp, _v,   4, sizeof(*(v))); break;	\
-	      case   8:	IA64_FETCHADD(_tmp, _v,   8, sizeof(*(v))); break;	\
-	      case  16:	IA64_FETCHADD(_tmp, _v,  16, sizeof(*(v))); break;	\
-	      default:								\
-		_tmp = __bad_increment_for_ia64_fetch_and_add();		\
-		break;								\
-	}									\
-	(__typeof__(*(v))) (_tmp + (i));	/* return new value */		\
-})
-
-/*
- * This function doesn't exist, so you'll get a linker error if
- * something tries to do an invalid xchg().
- */
-extern void __xchg_called_with_bad_pointer (void);
-
-static __inline__ unsigned long
-__xchg (unsigned long x, volatile void *ptr, int size)
-{
-	unsigned long result;
-
-	switch (size) {
-	      case 1:
-		__asm__ __volatile ("xchg1 %0=[%1],%2" : "=r" (result)
-				    : "r" (ptr), "r" (x) : "memory");
-		return result;
-
-	      case 2:
-		__asm__ __volatile ("xchg2 %0=[%1],%2" : "=r" (result)
-				    : "r" (ptr), "r" (x) : "memory");
-		return result;
-
-	      case 4:
-		__asm__ __volatile ("xchg4 %0=[%1],%2" : "=r" (result)
-				    : "r" (ptr), "r" (x) : "memory");
-		return result;
-
-	      case 8:
-		__asm__ __volatile ("xchg8 %0=[%1],%2" : "=r" (result)
-				    : "r" (ptr), "r" (x) : "memory");
-		return result;
-	}
-	__xchg_called_with_bad_pointer();
-	return x;
-}
-
-#define xchg(ptr,x)							     \
-  ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
-
-/*
- * Atomic compare and exchange.  Compare OLD with MEM, if identical,
- * store NEW in MEM.  Return the initial value in MEM.  Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid cmpxchg().
- */
-extern long __cmpxchg_called_with_bad_pointer(void);
-
-#define ia64_cmpxchg(sem,ptr,old,new,size)						\
-({											\
-	__typeof__(ptr) _p_ = (ptr);							\
-	__typeof__(new) _n_ = (new);							\
-	__u64 _o_, _r_;									\
-											\
-	switch (size) {									\
-	      case 1: _o_ = (__u8 ) (long) (old); break;				\
-	      case 2: _o_ = (__u16) (long) (old); break;				\
-	      case 4: _o_ = (__u32) (long) (old); break;				\
-	      case 8: _o_ = (__u64) (long) (old); break;				\
-	      default: break;								\
-	}										\
-	 __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_));				\
-	switch (size) {									\
-	      case 1:									\
-		__asm__ __volatile__ ("cmpxchg1."sem" %0=[%1],%2,ar.ccv"		\
-				      : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory");	\
-		break;									\
-											\
-	      case 2:									\
-		__asm__ __volatile__ ("cmpxchg2."sem" %0=[%1],%2,ar.ccv"		\
-				      : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory");	\
-		break;									\
-											\
-	      case 4:									\
-		__asm__ __volatile__ ("cmpxchg4."sem" %0=[%1],%2,ar.ccv"		\
-				      : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory");	\
-		break;									\
-											\
-	      case 8:									\
-		__asm__ __volatile__ ("cmpxchg8."sem" %0=[%1],%2,ar.ccv"		\
-				      : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory");	\
-		break;									\
-											\
-	      default:									\
-		_r_ = __cmpxchg_called_with_bad_pointer();				\
-		break;									\
-	}										\
-	(__typeof__(old)) _r_;								\
-})
-
-#define cmpxchg_acq(ptr,o,n)	ia64_cmpxchg("acq", (ptr), (o), (n), sizeof(*(ptr)))
-#define cmpxchg_rel(ptr,o,n)	ia64_cmpxchg("rel", (ptr), (o), (n), sizeof(*(ptr)))
-
-/* for compatibility with other platforms: */
-#define cmpxchg(ptr,o,n)	cmpxchg_acq(ptr,o,n)
-
-#ifdef CONFIG_IA64_DEBUG_CMPXCHG
-# define CMPXCHG_BUGCHECK_DECL	int _cmpxchg_bugcheck_count = 128;
-# define CMPXCHG_BUGCHECK(v)							\
-  do {										\
-	if (_cmpxchg_bugcheck_count-- <= 0) {					\
-		void *ip;							\
-		extern int printk(const char *fmt, ...);			\
-		asm ("mov %0=ip" : "=r"(ip));					\
-		printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));	\
-		break;								\
-	}									\
-  } while (0)
-#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
-# define CMPXCHG_BUGCHECK_DECL
-# define CMPXCHG_BUGCHECK(v)
-#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
-
 #ifdef __KERNEL__
 
-#define prepare_to_switch()    do { } while(0)
+#define prepare_to_switch()    local_irq_disable()
 
 #ifdef CONFIG_IA32_SUPPORT
 # define IS_IA32_PROCESS(regs)	(ia64_psr(regs)->is != 0)
@@ -407,14 +244,17 @@
 # define PERFMON_IS_SYSWIDE() (0)
 #endif
 
-#define __switch_to(prev,next,last) do {						\
-	if (((prev)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID))	\
-	    || IS_IA32_PROCESS(ia64_task_regs(prev)) || PERFMON_IS_SYSWIDE())	\
-		ia64_save_extra(prev);							\
-	if (((next)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID))	\
-	    || IS_IA32_PROCESS(ia64_task_regs(next)) || PERFMON_IS_SYSWIDE())	\
-		ia64_load_extra(next);							\
-	(last) = ia64_switch_to((next));						\
+#define IA64_HAS_EXTRA_STATE(t)							\
+	((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)	\
+	 || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
+
+#define __switch_to(prev,next,last) do {							 \
+	if (IA64_HAS_EXTRA_STATE(prev))								 \
+		ia64_save_extra(prev);								 \
+	if (IA64_HAS_EXTRA_STATE(next))								 \
+		ia64_load_extra(next);								 \
+	ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);			 \
+	(last) = ia64_switch_to((next));							 \
 } while (0)
 
 #ifdef CONFIG_SMP
@@ -423,37 +263,25 @@
 #define arch_consoles_callable() (cpu_online_map & (1UL << smp_processor_id()))
 
 /*
- * In the SMP case, we save the fph state when context-switching
- * away from a thread that modified fph.  This way, when the thread
- * gets scheduled on another CPU, the CPU can pick up the state from
- * task->thread.fph, avoiding the complication of having to fetch
- * the latest fph state from another CPU.
- */
-# define switch_to(prev,next,last) do {					\
-	if (ia64_psr(ia64_task_regs(prev))->mfh) {			\
-		ia64_psr(ia64_task_regs(prev))->mfh = 0;		\
-		(prev)->thread.flags |= IA64_THREAD_FPH_VALID;		\
-		__ia64_save_fpu((prev)->thread.fph);			\
-		(prev)->thread.last_fph_cpu = smp_processor_id();	\
-	}								\
-	if ((next)->thread.flags & IA64_THREAD_FPH_VALID) {		\
-		if (((next)->thread.last_fph_cpu == smp_processor_id()) \
-		    && (ia64_get_fpu_owner() == next)) {		\
-			ia64_psr(ia64_task_regs(next))->dfh = 0;	\
-			ia64_psr(ia64_task_regs(next))->mfh = 0;	\
-		} else {						\
-			ia64_psr(ia64_task_regs(next))->dfh = 1;	\
-		}							\
-	}								\
-	__switch_to(prev,next,last);					\
-  } while (0)
-#else
-# define switch_to(prev,next,last) do {					\
-	ia64_psr(ia64_task_regs(next))->dfh = (ia64_get_fpu_owner() != (next));	\
-	__switch_to(prev,next,last);					\
+ * In the SMP case, we save the fph state when context-switching away from a thread that
+ * modified fph.  This way, when the thread gets scheduled on another CPU, the CPU can
+ * pick up the state from task->thread.fph, avoiding the complication of having to fetch
+ * the latest fph state from another CPU.  In other words: eager save, lazy restore.
+ */
+# define switch_to(prev,next,last) do {						\
+	if (ia64_psr(ia64_task_regs(prev))->mfh) {				\
+		ia64_psr(ia64_task_regs(prev))->mfh = 0;			\
+		(prev)->thread.flags |= IA64_THREAD_FPH_VALID;			\
+		__ia64_save_fpu((prev)->thread.fph);				\
+	}									\
+	__switch_to(prev, next, last);						\
 } while (0)
+#else
+# define switch_to(prev,next,last)	__switch_to(prev, next, last)
 #endif
 
+#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
+
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)