mn10300: add cc clobbers to asm statements

gcc 4.2.1 for MN10300 is more agressive than the older gcc in
reordering/moving other insns between an insn that sets flags and an insn
that uses those flags.  This leads to trouble with asm statements which
are missing an explicit "cc" clobber.  This patch adds the explicit "cc"
clobber to asm statements which do indeed clobber the condition flags.

Signed-off-by: Mark Salter <msalter@redhat.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Mark Salter 2010-01-08 14:43:16 -08:00 committed by Linus Torvalds
parent b0641e86fb
commit d6bb7a1ad3
10 changed files with 18 additions and 12 deletions

View file

@ -165,7 +165,7 @@ static inline __attribute__((const))
unsigned long __ffs(unsigned long x)
{
int bit;
asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(x & -x));
asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(x & -x) : "cc");
return bit;
}
@ -177,7 +177,7 @@ static inline __attribute__((const))
int __ilog2_u32(u32 n)
{
int bit;
asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(n));
asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(n) : "cc");
return bit;
}

View file

@ -72,6 +72,7 @@ unsigned __muldiv64u(unsigned val, unsigned mult, unsigned div)
* MDR = MDR:val%div */
: "=r"(result)
: "0"(val), "ir"(mult), "r"(div)
: "cc"
);
return result;
@ -92,6 +93,7 @@ signed __muldiv64s(signed val, signed mult, signed div)
* MDR = MDR:val%div */
: "=r"(result)
: "0"(val), "ir"(mult), "r"(div)
: "cc"
);
return result;

View file

@ -143,6 +143,7 @@ do { \
" mov %0,epsw \n" \
: "=&d"(tmp) \
: "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw) \
: "cc" \
); \
} while (0)

View file

@ -22,7 +22,7 @@ do { \
" mov %0,%1 \n" \
: "=d"(w) \
: "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV) \
: "memory" \
: "cc", "memory" \
); \
} while (0)

View file

@ -316,7 +316,7 @@ do { \
" .previous\n" \
: "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
: "0"(__from), "1"(__to), "2"(size) \
: "memory"); \
: "cc", "memory"); \
} \
} while (0)
@ -352,7 +352,7 @@ do { \
" .previous\n" \
: "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
: "0"(__from), "1"(__to), "2"(size) \
: "memory"); \
: "cc", "memory"); \
} \
} while (0)

View file

@ -380,7 +380,8 @@ static int mask_test_and_clear(volatile u8 *ptr, u8 mask)
u32 epsw;
asm volatile(" bclr %1,(%2) \n"
" mov epsw,%0 \n"
: "=d"(epsw) : "d"(mask), "a"(ptr));
: "=d"(epsw) : "d"(mask), "a"(ptr)
: "cc", "memory");
return !(epsw & EPSW_FLAG_Z);
}

View file

@ -22,6 +22,7 @@ static inline unsigned short from32to16(__wsum sum)
" addc 0xffff,%0 \n"
: "=r" (sum)
: "r" (sum << 16), "0" (sum & 0xffff0000)
: "cc"
);
return sum >> 16;
}

View file

@ -28,7 +28,8 @@ void __delay(unsigned long loops)
"2: add -1,%0 \n"
" bne 2b \n"
: "=&d" (d0)
: "0" (loops));
: "0" (loops)
: "cc");
}
EXPORT_SYMBOL(__delay);

View file

@ -62,7 +62,7 @@ do { \
" .previous" \
:"=&r"(res), "=r"(count), "=&r"(w) \
:"i"(-EFAULT), "1"(count), "a"(src), "a"(dst) \
:"memory"); \
: "memory", "cc"); \
} while (0)
long
@ -109,7 +109,7 @@ do { \
".previous\n" \
: "+r"(size), "=&r"(w) \
: "a"(addr), "d"(0) \
: "memory"); \
: "memory", "cc"); \
} while (0)
unsigned long
@ -161,6 +161,6 @@ long strnlen_user(const char *s, long n)
".previous\n"
:"=d"(res), "=&r"(w)
:"0"(0), "a"(s), "r"(n)
:"memory");
: "memory", "cc");
return res;
}

View file

@ -633,13 +633,13 @@ static int misalignment_addr(unsigned long *registers, unsigned long sp,
goto displace_or_inc;
case SD24:
tmp = disp << 8;
asm("asr 8,%0" : "=r"(tmp) : "0"(tmp));
asm("asr 8,%0" : "=r"(tmp) : "0"(tmp) : "cc");
disp = (long) tmp;
goto displace_or_inc;
case SIMM4_2:
tmp = opcode >> 4 & 0x0f;
tmp <<= 28;
asm("asr 28,%0" : "=r"(tmp) : "0"(tmp));
asm("asr 28,%0" : "=r"(tmp) : "0"(tmp) : "cc");
disp = (long) tmp;
goto displace_or_inc;
case IMM8: