pkgsrc/lang/gcc/patches/patch-ay
jmc 2ce70215a8 Update patch set against a sync of source from gnusrc/dist/toolchain.
Not very many overall changes. Main ones include

1. Support for powerpc, arm32 and vax
2. Makefile.gcc can now be included by anything which depends on gcc versions.
   If the version installed isn't 2.95.3 it'll add itself as a BUILD_DEPENDS.
   (XXX: any of the makefile's in pkgsrc should be checked and change to use
    this)

3. Remove special PLIST.NetBSD-sparc as it's no longer needed
4. Change post-extract loop to pick up any arch files from FILESDIR without
   having to hardcode all the archs
5. Remove arch restrictions as this should work on any arch supported by the
   main source tree as of 03/28/02
6. Add PKGREVISION as this clearly isn't stock 2.95.3 (it doesn't change
   gcc --version so version checks won't care).
2002-03-28 10:11:50 +00:00

1396 lines
45 KiB
Text
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

$NetBSD: patch-ay,v 1.3 2002/03/28 10:11:53 jmc Exp $
--- ../gcc-2.95.3/gcc/config/sparc/sparc.md.orig 2001/04/23 12:00:43 1.1.1.2
+++ ../gcc-2.95.3/gcc/config/sparc/sparc.md 2002/03/28 08:14:56 1.5
@@ -837,7 +837,7 @@
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
emit_insn (gen_sne (operands[0]));
DONE;
}
@@ -890,7 +890,7 @@
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
emit_insn (gen_sne (operands[0]));
DONE;
}
@@ -911,7 +911,7 @@
{
if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
emit_insn (gen_sne (operands[0]));
DONE;
}
@@ -932,7 +932,7 @@
{
if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
emit_insn (gen_sne (operands[0]));
DONE;
}
@@ -953,7 +953,7 @@
{
if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
emit_insn (gen_sne (operands[0]));
DONE;
}
@@ -974,7 +974,7 @@
{
if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
emit_insn (gen_sne (operands[0]));
DONE;
}
@@ -1608,7 +1608,7 @@
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
emit_jump_insn (gen_bne (operands[0]));
DONE;
}
@@ -1632,7 +1632,7 @@
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
emit_jump_insn (gen_bne (operands[0]));
DONE;
}
@@ -1656,7 +1656,7 @@
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
emit_jump_insn (gen_bne (operands[0]));
DONE;
}
@@ -1690,7 +1690,7 @@
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
emit_jump_insn (gen_bne (operands[0]));
DONE;
}
@@ -1724,7 +1724,7 @@
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
emit_jump_insn (gen_bne (operands[0]));
DONE;
}
@@ -1758,7 +1758,7 @@
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
emit_jump_insn (gen_bne (operands[0]));
DONE;
}
@@ -1774,6 +1774,145 @@
"
{ operands[1] = gen_compare_reg (LEU, sparc_compare_op0, sparc_compare_op1);
}")
+
+;;(define_expand "bunordered"
+;; [(set (pc)
+;; (if_then_else (unordered (match_dup 1) (const_int 0))
+;; (label_ref (match_operand 0 "" ""))
+;; (pc)))]
+;; ""
+;; "
+;;{
+;; if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+;; {
+;; sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1,
+;; UNORDERED);
+;; emit_jump_insn (gen_beq (operands[0]));
+;; DONE;
+;; }
+;; operands[1] = gen_compare_reg (UNORDERED, sparc_compare_op0,
+;; sparc_compare_op1);
+;;}")
+
+;;(define_expand "bordered"
+;; [(set (pc)
+;; (if_then_else (ordered (match_dup 1) (const_int 0))
+;; (label_ref (match_operand 0 "" ""))
+;; (pc)))]
+;; ""
+;; "
+;;{
+;; if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+;; {
+;; sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, ORDERED);
+;; emit_jump_insn (gen_bne (operands[0]));
+;; DONE;
+;; }
+;; operands[1] = gen_compare_reg (ORDERED, sparc_compare_op0,
+;; sparc_compare_op1);
+;;}")
+;;
+;;(define_expand "bungt"
+;; [(set (pc)
+;; (if_then_else (ungt (match_dup 1) (const_int 0))
+;; (label_ref (match_operand 0 "" ""))
+;; (pc)))]
+;; ""
+;; "
+;;{
+;; if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+;; {
+;; sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNGT);
+;; emit_jump_insn (gen_bgt (operands[0]));
+;; DONE;
+;; }
+;; operands[1] = gen_compare_reg (UNGT, sparc_compare_op0, sparc_compare_op1);
+;;}")
+;;
+;;(define_expand "bunlt"
+;; [(set (pc)
+;; (if_then_else (unlt (match_dup 1) (const_int 0))
+;; (label_ref (match_operand 0 "" ""))
+;; (pc)))]
+;; ""
+;; "
+;;{
+;; if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+;; {
+;; sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNLT);
+;; emit_jump_insn (gen_bne (operands[0]));
+;; DONE;
+;; }
+;; operands[1] = gen_compare_reg (UNLT, sparc_compare_op0, sparc_compare_op1);
+;;}")
+;;
+;;(define_expand "buneq"
+;; [(set (pc)
+;; (if_then_else (uneq (match_dup 1) (const_int 0))
+;; (label_ref (match_operand 0 "" ""))
+;; (pc)))]
+;; ""
+;; "
+;;{
+;; if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+;; {
+;; sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNEQ);
+;; emit_jump_insn (gen_beq (operands[0]));
+;; DONE;
+;; }
+;; operands[1] = gen_compare_reg (UNEQ, sparc_compare_op0, sparc_compare_op1);
+;;}")
+;;
+;;(define_expand "bunge"
+;; [(set (pc)
+;; (if_then_else (unge (match_dup 1) (const_int 0))
+;; (label_ref (match_operand 0 "" ""))
+;; (pc)))]
+;; ""
+;; "
+;;{
+;; if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+;; {
+;; sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNGE);
+;; emit_jump_insn (gen_bne (operands[0]));
+;; DONE;
+;; }
+;; operands[1] = gen_compare_reg (UNGE, sparc_compare_op0, sparc_compare_op1);
+;;}")
+;;
+;;(define_expand "bunle"
+;; [(set (pc)
+;; (if_then_else (unle (match_dup 1) (const_int 0))
+;; (label_ref (match_operand 0 "" ""))
+;; (pc)))]
+;; ""
+;; "
+;;{
+;; if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+;; {
+;; sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNLE);
+;; emit_jump_insn (gen_bne (operands[0]));
+;; DONE;
+;; }
+;; operands[1] = gen_compare_reg (UNLE, sparc_compare_op0, sparc_compare_op1);
+;;}")
+;;
+;;(define_expand "bltgt"
+;; [(set (pc)
+;; (if_then_else (ltgt (match_dup 1) (const_int 0))
+;; (label_ref (match_operand 0 "" ""))
+;; (pc)))]
+;; ""
+;; "
+;;{
+;; if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+;; {
+;; sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LTGT);
+;; emit_jump_insn (gen_bne (operands[0]));
+;; DONE;
+;; }
+;; operands[1] = gen_compare_reg (LTGT, sparc_compare_op0, sparc_compare_op1);
+;;}")
;; Now match both normal and inverted jump.
@@ -2496,7 +2635,7 @@
[(set (match_operand:DI 0 "register_operand" "=r")
(high:DI (match_operand:DI 1 "sp64_medium_pic_operand" "")))]
"(TARGET_CM_MEDLOW || TARGET_CM_EMBMEDANY) && check_pic (1)"
- "sethi\\t%%lo(%a1), %0"
+ "sethi\\t%%hi(%a1), %0"
[(set_attr "type" "move")
(set_attr "length" "1")])
@@ -3473,10 +3612,7 @@
(define_split
[(set (match_operand:TF 0 "register_operand" "")
(match_operand:TF 1 "register_operand" ""))]
- "reload_completed
- && (! TARGET_ARCH64
- || (TARGET_FPU
- && ! TARGET_HARD_QUAD))"
+ "reload_completed"
[(clobber (const_int 0))]
"
{
@@ -4521,16 +4657,70 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "extendsftf2"
+(define_expand "extendsftf2"
[(set (match_operand:TF 0 "register_operand" "=e")
(float_extend:TF
(match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_stoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], SFmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*extendsftf2_hq"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float_extend:TF
+ (match_operand:SF 1 "register_operand" "f")))]
"TARGET_FPU && TARGET_HARD_QUAD"
"fstoq\\t%1, %0"
[(set_attr "type" "fp")
(set_attr "length" "1")])
+
+(define_expand "extenddftf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float_extend:TF
+ (match_operand:DF 1 "register_operand" "e")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_dtoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], DFmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
-(define_insn "extenddftf2"
+(define_insn "*extenddftf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(float_extend:TF
(match_operand:DF 1 "register_operand" "e")))]
@@ -4547,8 +4737,34 @@
"fdtos\\t%1, %0"
[(set_attr "type" "fp")
(set_attr "length" "1")])
+
+(define_expand "trunctfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:TF 1 "register_operand" "e")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtos\"),
+ operands[0], 0, SFmode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+ }
+}")
-(define_insn "trunctfsf2"
+(define_insn "*trunctfsf2_hq"
[(set (match_operand:SF 0 "register_operand" "=f")
(float_truncate:SF
(match_operand:TF 1 "register_operand" "e")))]
@@ -4557,7 +4773,33 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "trunctfdf2"
+(define_expand "trunctfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (float_truncate:DF
+ (match_operand:TF 1 "register_operand" "e")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtod\"),
+ operands[0], 0, DFmode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+ }
+}")
+
+(define_insn "*trunctfdf2_hq"
[(set (match_operand:DF 0 "register_operand" "=e")
(float_truncate:DF
(match_operand:TF 1 "register_operand" "e")))]
@@ -4583,8 +4825,34 @@
"fitod\\t%1, %0"
[(set_attr "type" "fp")
(set_attr "length" "1")])
+
+(define_expand "floatsitf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float:TF (match_operand:SI 1 "register_operand" "f")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[1];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_itoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], SImode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
-(define_insn "floatsitf2"
+(define_insn "*floatsitf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(float:TF (match_operand:SI 1 "register_operand" "f")))]
"TARGET_FPU && TARGET_HARD_QUAD"
@@ -4592,6 +4860,29 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
+(define_expand "floatunssitf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (unsigned_float:TF (match_operand:SI 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
+ "
+{
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[1];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_uitoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], SImode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+}")
+
;; Now the same for 64 bit sources.
(define_insn "floatdisf2"
@@ -4610,14 +4901,63 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "floatditf2"
+(define_expand "floatditf2"
[(set (match_operand:TF 0 "register_operand" "=e")
+ (float:TF (match_operand:DI 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_V9 && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[1];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_xtoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], DImode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*floatditf2_hq"
+ [(set (match_operand:TF 0 "register_operand" "=e")
(float:TF (match_operand:DI 1 "register_operand" "e")))]
"TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
"fxtoq\\t%1, %0"
[(set_attr "type" "fp")
(set_attr "length" "1")])
+(define_expand "floatunsditf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (unsigned_float:TF (match_operand:DI 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
+ "
+{
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[1];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_uxtoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], DImode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+}")
+
;; Convert a float to an actual integer.
;; Truncation is performed as part of the conversion.
@@ -4636,8 +4976,33 @@
"fdtoi\\t%1, %0"
[(set_attr "type" "fp")
(set_attr "length" "1")])
+
+(define_expand "fix_trunctfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtoi\"),
+ operands[0], 0, SImode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+ }
+}")
-(define_insn "fix_trunctfsi2"
+(define_insn "*fix_trunctfsi2_hq"
[(set (match_operand:SI 0 "register_operand" "=f")
(fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
"TARGET_FPU && TARGET_HARD_QUAD"
@@ -4645,6 +5010,28 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
+(define_expand "fixuns_trunctfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (unsigned_fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ "TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
+ "
+{
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtoui\"),
+ operands[0], 0, SImode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+}")
+
;; Now the same, for V9 targets
(define_insn "fix_truncsfdi2"
@@ -4663,13 +5050,61 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "fix_trunctfdi2"
+(define_expand "fix_trunctfdi2"
[(set (match_operand:DI 0 "register_operand" "=e")
+ (fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ "TARGET_V9 && TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtox\"),
+ operands[0], 0, DImode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+ }
+}")
+
+(define_insn "*fix_trunctfdi2_hq"
+ [(set (match_operand:DI 0 "register_operand" "=e")
(fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
"TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
"fqtox\\t%1, %0"
[(set_attr "type" "fp")
(set_attr "length" "1")])
+
+(define_expand "fixuns_trunctfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=f")
+ (unsigned_fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ "TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
+ "
+{
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtoux\"),
+ operands[0], 0, DImode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+}")
+
;;- arithmetic instructions
@@ -5170,9 +5605,8 @@
;; Integer Multiply/Divide.
-;; The 32 bit multiply/divide instructions are deprecated on v9 and shouldn't
-;; we used. We still use them in 32 bit v9 compilers.
-;; The 64 bit v9 compiler will (/should) widen the args and use muldi3.
+;; The 32 bit multiply/divide instructions are deprecated on v9, but at
+;; least in UltraSPARC I, II and IIi it is a win tick-wise.
(define_insn "mulsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
@@ -5230,15 +5664,13 @@
}"
[(set_attr "length" "9,8")])
-;; It is not known whether this will match.
-
(define_insn "*cmp_mul_set"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (mult:SI (match_operand:SI 1 "arith_operand" "%r")
- (match_operand:SI 2 "arith_operand" "rI")))
- (set (reg:CC_NOOV 100)
- (compare:CC_NOOV (mult:SI (match_dup 1) (match_dup 2))
- (const_int 0)))]
+ [(set (reg:CC 100)
+ (compare:CC (mult:SI (match_operand:SI 1 "arith_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_dup 1) (match_dup 2)))]
"TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS"
"smulcc\\t%1, %2, %0"
[(set_attr "type" "imul")
@@ -5254,12 +5686,11 @@
if (CONSTANT_P (operands[2]))
{
if (TARGET_V8PLUS)
- {
- emit_insn (gen_const_mulsidi3_v8plus (operands[0], operands[1],
- operands[2]));
- DONE;
- }
- emit_insn (gen_const_mulsidi3 (operands[0], operands[1], operands[2]));
+ emit_insn (gen_const_mulsidi3_v8plus (operands[0], operands[1],
+ operands[2]));
+ else
+ emit_insn (gen_const_mulsidi3_sp32 (operands[0], operands[1],
+ operands[2]));
DONE;
}
if (TARGET_V8PLUS)
@@ -5309,14 +5740,22 @@
(if_then_else (eq_attr "isa" "sparclet")
(const_int 1) (const_int 2)))])
+(define_insn "*mulsidi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "smul\\t%1, %2, %0"
+ [(set_attr "length" "1")])
+
;; Extra pattern, because sign_extend of a constant isn't valid.
;; XXX
-(define_insn "const_mulsidi3"
+(define_insn "const_mulsidi3_sp32"
[(set (match_operand:DI 0 "register_operand" "=r")
(mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
(match_operand:SI 2 "small_int" "I")))]
- "TARGET_HARD_MUL"
+ "TARGET_HARD_MUL32"
"*
{
return TARGET_SPARCLET ? \"smuld\\t%1, %2, %L0\" : \"smul\\t%1, %2, %L0\\n\\trd\\t%%y, %H0\";
@@ -5325,13 +5764,21 @@
(if_then_else (eq_attr "isa" "sparclet")
(const_int 1) (const_int 2)))])
+(define_insn "const_mulsidi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "small_int" "I")))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "smul\\t%1, %2, %0"
+ [(set_attr "length" "1")])
+
(define_expand "smulsi3_highpart"
[(set (match_operand:SI 0 "register_operand" "")
(truncate:SI
(lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
(sign_extend:DI (match_operand:SI 2 "arith_operand" "")))
(const_int 32))))]
- "TARGET_HARD_MUL"
+ "TARGET_HARD_MUL && TARGET_ARCH32"
"
{
if (CONSTANT_P (operands[2]))
@@ -5407,8 +5854,7 @@
(lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
(sign_extend:DI (match_operand:SI 2 "register_operand" "r")))
(const_int 32))))]
- "TARGET_HARD_MUL32
- && ! TARGET_LIVE_G0"
+ "TARGET_HARD_MUL32 && ! TARGET_LIVE_G0"
"smul\\t%1, %2, %%g0\\n\\trd\\t%%y, %0"
[(set_attr "length" "2")])
@@ -5419,8 +5865,7 @@
(lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
(match_operand:SI 2 "register_operand" "r"))
(const_int 32))))]
- "TARGET_HARD_MUL32
- && ! TARGET_LIVE_G0"
+ "TARGET_HARD_MUL32 && ! TARGET_LIVE_G0"
"smul\\t%1, %2, %%g0\\n\\trd\\t%%y, %0"
[(set_attr "length" "2")])
@@ -5434,12 +5879,11 @@
if (CONSTANT_P (operands[2]))
{
if (TARGET_V8PLUS)
- {
- emit_insn (gen_const_umulsidi3_v8plus (operands[0], operands[1],
- operands[2]));
- DONE;
- }
- emit_insn (gen_const_umulsidi3 (operands[0], operands[1], operands[2]));
+ emit_insn (gen_const_umulsidi3_v8plus (operands[0], operands[1],
+ operands[2]));
+ else
+ emit_insn (gen_const_umulsidi3_sp32 (operands[0], operands[1],
+ operands[2]));
DONE;
}
if (TARGET_V8PLUS)
@@ -5475,10 +5919,18 @@
(if_then_else (eq_attr "isa" "sparclet")
(const_int 1) (const_int 2)))])
+(define_insn "*umulsidi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "umul\\t%1, %2, %0"
+ [(set_attr "length" "1")])
+
;; Extra pattern, because sign_extend of a constant isn't valid.
;; XXX
-(define_insn "const_umulsidi3"
+(define_insn "const_umulsidi3_sp32"
[(set (match_operand:DI 0 "register_operand" "=r")
(mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
(match_operand:SI 2 "uns_small_int" "")))]
@@ -5491,6 +5943,14 @@
(if_then_else (eq_attr "isa" "sparclet")
(const_int 1) (const_int 2)))])
+(define_insn "const_umulsidi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "uns_small_int" "")))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "umul\\t%1, %2, %0"
+ [(set_attr "length" "1")])
+
;; XXX
(define_insn "const_umulsidi3_v8plus"
[(set (match_operand:DI 0 "register_operand" "=h,r")
@@ -5509,7 +5969,7 @@
(lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
(zero_extend:DI (match_operand:SI 2 "uns_arith_operand" "")))
(const_int 32))))]
- "TARGET_HARD_MUL"
+ "TARGET_HARD_MUL && TARGET_ARCH32"
"
{
if (CONSTANT_P (operands[2]))
@@ -5568,8 +6028,7 @@
(lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
(zero_extend:DI (match_operand:SI 2 "register_operand" "r")))
(const_int 32))))]
- "TARGET_HARD_MUL32
- && ! TARGET_LIVE_G0"
+ "TARGET_HARD_MUL32 && ! TARGET_LIVE_G0"
"umul\\t%1, %2, %%g0\\n\\trd\\t%%y, %0"
[(set_attr "length" "2")])
@@ -5580,40 +6039,64 @@
(lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
(match_operand:SI 2 "uns_small_int" ""))
(const_int 32))))]
- "TARGET_HARD_MUL32
- && ! TARGET_LIVE_G0"
+ "TARGET_HARD_MUL32 && ! TARGET_LIVE_G0"
"umul\\t%1, %2, %%g0\\n\\trd\\t%%y, %0"
[(set_attr "length" "2")])
;; The v8 architecture specifies that there must be 3 instructions between
;; a y register write and a use of it for correct results.
+
+(define_expand "divsi3"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (div:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "input_operand" "rI,m")))
+ (clobber (match_scratch:SI 3 "=&r,&r"))])]
+ "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
+ "
+{
+ if (TARGET_ARCH64)
+ {
+ operands[3] = gen_reg_rtx(SImode);
+ emit_insn (gen_ashrsi3 (operands[3], operands[1], GEN_INT (31)));
+ emit_insn (gen_divsi3_sp64 (operands[0], operands[1], operands[2],
+ operands[3]));
+ DONE;
+ }
+}")
-;; XXX SHEESH
-(define_insn "divsi3"
+(define_insn "divsi3_sp32"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(div:SI (match_operand:SI 1 "register_operand" "r,r")
(match_operand:SI 2 "input_operand" "rI,m")))
(clobber (match_scratch:SI 3 "=&r,&r"))]
- "(TARGET_V8
- || TARGET_DEPRECATED_V8_INSNS)
- && ! TARGET_LIVE_G0"
+ "(TARGET_V8 || TARGET_DEPRECATED_V8_INSNS)
+ && TARGET_ARCH32"
"*
{
if (which_alternative == 0)
- if (TARGET_V9)
- return \"sra\\t%1, 31, %3\\n\\twr\\t%%g0, %3, %%y\\n\\tsdiv\\t%1, %2, %0\";
- else
- return \"sra\\t%1, 31, %3\\n\\twr\\t%%g0, %3, %%y\\n\\tnop\\n\\tnop\\n\\tnop\\n\\tsdiv\\t%1, %2, %0\";
+ if (TARGET_V9)
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tsdiv\\t%1, %2, %0\";
+ else
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tnop\\n\\tnop\\n\\tnop\\n\\tsdiv\\t%1, %2, %0\";
else
if (TARGET_V9)
- return \"sra\\t%1, 31, %3\\n\\twr\\t%%g0, %3, %%y\\n\\tld\\t%2, %3\\n\\tsdiv\\t%1, %3, %0\";
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tld\\t%2, %3\\n\\tsdiv\\t%1, %3, %0\";
else
- return \"sra\\t%1, 31, %3\\n\\twr\\t%%g0, %3, %%y\\n\\tld\\t%2, %3\\n\\tnop\\n\\tnop\\n\\tsdiv\\t%1, %3, %0\";
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tld\\t%2, %3\\n\\tnop\\n\\tnop\\n\\tsdiv\\t%1, %3, %0\";
}"
[(set (attr "length")
(if_then_else (eq_attr "isa" "v9")
(const_int 4) (const_int 7)))])
+(define_insn "divsi3_sp64"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (div:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "input_operand" "rI")))
+ (use (match_operand:SI 3 "register_operand" "r"))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "wr\\t%%g0, %3, %%y\\n\\tsdiv\\t%1, %2, %0"
+ [(set_attr "length" "2")])
+
(define_insn "divdi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(div:DI (match_operand:DI 1 "register_operand" "r")
@@ -5621,47 +6104,47 @@
"TARGET_ARCH64"
"sdivx\\t%1, %2, %0")
-;; It is not known whether this will match.
-
-;; XXX I hope it doesn't fucking match...
(define_insn "*cmp_sdiv_cc_set"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (div:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "arith_operand" "rI")))
- (set (reg:CC 100)
- (compare:CC (div:SI (match_dup 1) (match_dup 2))
+ [(set (reg:CC 100)
+ (compare:CC (div:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI"))
(const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (div:SI (match_dup 1) (match_dup 2)))
(clobber (match_scratch:SI 3 "=&r"))]
- "(TARGET_V8
- || TARGET_DEPRECATED_V8_INSNS)
- && ! TARGET_LIVE_G0"
+ "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
"*
{
if (TARGET_V9)
- return \"sra\\t%1, 31, %3\\n\\twr\\t%%g0, %3, %%y\\n\\tsdivcc\\t%1, %2, %0\";
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tsdivcc\\t%1, %2, %0\";
else
- return \"sra\\t%1, 31, %3\\n\\twr\\t%%g0, %3, %%y\\n\\tnop\\n\\tnop\\n\\tnop\\n\\tsdivcc\\t%1, %2, %0\";
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tnop\\n\\tnop\\n\\tnop\\n\\tsdivcc\\t%1, %2, %0\";
}"
[(set (attr "length")
(if_then_else (eq_attr "isa" "v9")
(const_int 3) (const_int 6)))])
;; XXX
-(define_insn "udivsi3"
+(define_expand "udivsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (udiv:SI (match_operand:SI 1 "reg_or_nonsymb_mem_operand" "")
+ (match_operand:SI 2 "input_operand" "")))]
+ "(TARGET_V8 || TARGET_DEPRECATED_V8_INSNS) && ! TARGET_LIVE_G0"
+ "")
+
+(define_insn "udivsi3_sp32"
[(set (match_operand:SI 0 "register_operand" "=r,&r,&r")
(udiv:SI (match_operand:SI 1 "reg_or_nonsymb_mem_operand" "r,r,m")
(match_operand:SI 2 "input_operand" "rI,m,r")))]
"(TARGET_V8
|| TARGET_DEPRECATED_V8_INSNS)
- && ! TARGET_LIVE_G0"
+ && TARGET_ARCH32 && ! TARGET_LIVE_G0"
"*
{
output_asm_insn (\"wr\\t%%g0, %%g0, %%y\", operands);
switch (which_alternative)
{
default:
- if (TARGET_V9)
- return \"udiv\\t%1, %2, %0\";
return \"nop\\n\\tnop\\n\\tnop\\n\\tudiv\\t%1, %2, %0\";
case 1:
return \"ld\\t%2, %0\\n\\tnop\\n\\tnop\\n\\tudiv\\t%1, %0, %0\";
@@ -5669,11 +6152,16 @@
return \"ld\\t%1, %0\\n\\tnop\\n\\tnop\\n\\tudiv\\t%0, %2, %0\";
}
}"
- [(set (attr "length")
- (if_then_else (and (eq_attr "isa" "v9")
- (eq_attr "alternative" "0"))
- (const_int 2) (const_int 5)))])
+ [(set_attr "length" "5")])
+(define_insn "udivsi3_sp64"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "reg_or_nonsymb_mem_operand" "r")
+ (match_operand:SI 2 "input_operand" "rI")))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "wr\\t%%g0, 0, %%y\\n\\tudiv\\t%1, %2, %0"
+ [(set_attr "length" "2")])
+
(define_insn "udivdi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(udiv:DI (match_operand:DI 1 "register_operand" "r")
@@ -5681,16 +6169,13 @@
"TARGET_ARCH64"
"udivx\\t%1, %2, %0")
-;; It is not known whether this will match.
-
-;; XXX I hope it doesn't fucking match...
(define_insn "*cmp_udiv_cc_set"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (udiv:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "arith_operand" "rI")))
- (set (reg:CC 100)
- (compare:CC (udiv:SI (match_dup 1) (match_dup 2))
- (const_int 0)))]
+ [(set (reg:CC 100)
+ (compare:CC (udiv:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (udiv:SI (match_dup 1) (match_dup 2)))]
"(TARGET_V8
|| TARGET_DEPRECATED_V8_INSNS)
&& ! TARGET_LIVE_G0"
@@ -6544,8 +7029,50 @@
(set_attr "length" "1")])
;; Floating point arithmetic instructions.
+
+(define_expand "addtf3"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (plus:TF (match_operand:TF 1 "general_operand" "")
+ (match_operand:TF 2 "general_operand" "")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0, slot1, slot2;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
+ }
+ else
+ slot1 = operands[1];
+ if (GET_CODE (operands[2]) != MEM)
+ {
+ slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
+ }
+ else
+ slot2 = operands[2];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_add\"), 0,
+ VOIDmode, 3,
+ XEXP (slot0, 0), Pmode,
+ XEXP (slot1, 0), Pmode,
+ XEXP (slot2, 0), Pmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
-(define_insn "addtf3"
+(define_insn "*addtf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(plus:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
@@ -6571,8 +7098,50 @@
"fadds\\t%1, %2, %0"
[(set_attr "type" "fp")
(set_attr "length" "1")])
+
+(define_expand "subtf3"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (minus:TF (match_operand:TF 1 "general_operand" "")
+ (match_operand:TF 2 "general_operand" "")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0, slot1, slot2;
-(define_insn "subtf3"
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
+ }
+ else
+ slot1 = operands[1];
+ if (GET_CODE (operands[2]) != MEM)
+ {
+ slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
+ }
+ else
+ slot2 = operands[2];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_sub\"), 0,
+ VOIDmode, 3,
+ XEXP (slot0, 0), Pmode,
+ XEXP (slot1, 0), Pmode,
+ XEXP (slot2, 0), Pmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*subtf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(minus:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
@@ -6598,8 +7167,50 @@
"fsubs\\t%1, %2, %0"
[(set_attr "type" "fp")
(set_attr "length" "1")])
+
+(define_expand "multf3"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (mult:TF (match_operand:TF 1 "general_operand" "")
+ (match_operand:TF 2 "general_operand" "")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0, slot1, slot2;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
+ }
+ else
+ slot1 = operands[1];
+ if (GET_CODE (operands[2]) != MEM)
+ {
+ slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
+ }
+ else
+ slot2 = operands[2];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_mul\"), 0,
+ VOIDmode, 3,
+ XEXP (slot0, 0), Pmode,
+ XEXP (slot1, 0), Pmode,
+ XEXP (slot2, 0), Pmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
-(define_insn "multf3"
+(define_insn "*multf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(mult:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
@@ -6644,8 +7255,50 @@
[(set_attr "type" "fpmul")
(set_attr "length" "1")])
+(define_expand "divtf3"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (div:TF (match_operand:TF 1 "general_operand" "")
+ (match_operand:TF 2 "general_operand" "")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0, slot1, slot2;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
+ }
+ else
+ slot1 = operands[1];
+ if (GET_CODE (operands[2]) != MEM)
+ {
+ slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
+ }
+ else
+ slot2 = operands[2];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_div\"), 0,
+ VOIDmode, 3,
+ XEXP (slot0, 0), Pmode,
+ XEXP (slot1, 0), Pmode,
+ XEXP (slot2, 0), Pmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
;; don't have timing for quad-prec. divide.
-(define_insn "divtf3"
+(define_insn "*divtf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(div:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
@@ -6916,8 +7569,41 @@
[(set_attr "type" "fpmove")
(set_attr "length" "1")])
-(define_insn "sqrttf2"
+(define_expand "sqrttf2"
[(set (match_operand:TF 0 "register_operand" "=e")
+ (sqrt:TF (match_operand:TF 1 "register_operand" "e")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0, slot1;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
+ }
+ else
+ slot1 = operands[1];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_sqrt\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ XEXP (slot1, 0), Pmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*sqrttf2_hq"
+ [(set (match_operand:TF 0 "register_operand" "=e")
(sqrt:TF (match_operand:TF 1 "register_operand" "e")))]
"TARGET_FPU && TARGET_HARD_QUAD"
"fsqrtq\\t%1, %0"
@@ -7252,6 +7938,84 @@
"TARGET_V8PLUS"
"*return sparc_v8plus_shift (operands, insn, \"srlx\");"
[(set_attr "length" "5,5,6")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI (subreg:SI (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 32)) 0)
+ (match_operand:SI 2 "small_int_or_double" "n")))]
+ "TARGET_ARCH64
+ && ((GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) < 32)
+ || (GET_CODE (operands[2]) == CONST_DOUBLE
+ && !CONST_DOUBLE_HIGH (operands[2])
+ && (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (operands[2]) < 32))"
+ "*
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) + 32);
+
+ return \"srax\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI (subreg:SI (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 32)) 0)
+ (match_operand:SI 2 "small_int_or_double" "n")))]
+ "TARGET_ARCH64
+ && ((GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) < 32)
+ || (GET_CODE (operands[2]) == CONST_DOUBLE
+ && !CONST_DOUBLE_HIGH (operands[2])
+ && (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (operands[2]) < 32))"
+ "*
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) + 32);
+
+ return \"srlx\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI (subreg:SI (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "small_int_or_double" "n")) 0)
+ (match_operand:SI 3 "small_int_or_double" "n")))]
+ "TARGET_ARCH64
+ && GET_CODE (operands[2]) == CONST_INT && GET_CODE (operands[3]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) >= 32
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[3]) < 32
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[2]) + INTVAL (operands[3])) < 64"
+ "*
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) + INTVAL (operands[3]));
+
+ return \"srax\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI (subreg:SI (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "small_int_or_double" "n")) 0)
+ (match_operand:SI 3 "small_int_or_double" "n")))]
+ "TARGET_ARCH64
+ && GET_CODE (operands[2]) == CONST_INT && GET_CODE (operands[3]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) >= 32
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[3]) < 32
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[2]) + INTVAL (operands[3])) < 64"
+ "*
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) + INTVAL (operands[3]));
+
+ return \"srlx\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
;; Unconditional and other jump instructions
;; On the Sparc, by setting the annul bit on an unconditional branch, the