Try to build under NetBSD/evbearmv7hf-el.

Still broken.
This commit is contained in:
ryoon 2014-11-26 14:56:28 +00:00
parent 155a092bb6
commit 9e0cccc7ef
10 changed files with 852 additions and 30 deletions

View file

@ -1,4 +1,4 @@
$NetBSD: distinfo,v 1.171 2014/11/18 15:33:14 tron Exp $
$NetBSD: distinfo,v 1.172 2014/11/26 14:56:28 ryoon Exp $
SHA1 (firefox-33.1.1.source.tar.bz2) = 1e9e3176e7d221c4f2ce479f37ee7c432236a0ec
RMD160 (firefox-33.1.1.source.tar.bz2) = 25cb102648b2ea1b00b0339cedaabf4c97d59c23
@ -42,11 +42,13 @@ SHA1 (patch-gfx_graphite2_src_Bidi.cpp) = 5e80b4a32a47ae44d237fec69ea87bdd612a76
SHA1 (patch-gfx_moz.build) = a98bda4727538f4a0f09a20b84f9dd883edaf7d9
SHA1 (patch-gfx_skia_Makefile.in) = 03349db0ff9b14c2012c36d746adf2ee6cb87e62
SHA1 (patch-gfx_thebes_Makefile.in) = 16b63bc42870aa8c042416c6e04b3513981ca6f0
SHA1 (patch-gfx_ycbcr_yuv__row__arm.s) = e9bcb52b892da1f7c4dd891fedf3cd918b5c7732
SHA1 (patch-image_decoders_nsJPEGDecoder.cpp) = e5df11499b1ec14e1d4c9a1408c0611d1c4e0574
SHA1 (patch-intl_hyphenation_src_hnjalloc.h) = 7fcc7b0fcf5a33486214197e925fbc8e6e22e2ee
SHA1 (patch-intl_unicharutil_util_Makefile.in) = 77ee18c5a8d5745f6350862109e0ccf028736d9f
SHA1 (patch-ipc_chromium_Makefile.in) = edd85b79cd3879f7595b932442bb0e93505d5506
SHA1 (patch-ipc_chromium_moz.build) = de12255865e45cfdf0f7dc57ce101be75ac8ad54
SHA1 (patch-ipc_chromium_src_base_atomicops.h) = 24b63a6e51d9ab27f2788ee02f2ffa7e1c36f29a
SHA1 (patch-ipc_chromium_src_base_base__paths.h) = 7bc83577a9678542db4dff7dda19d751ff6d78c1
SHA1 (patch-ipc_chromium_src_base_debug__util__posix.cc) = 677f0b5ccdcde7a8551f2d07d2a472361a5da03f
SHA1 (patch-ipc_chromium_src_base_file__util.h) = 9d6d074b1692c6b4e722bfac8d67c66040a6fa76
@ -71,9 +73,11 @@ SHA1 (patch-ipc_ipdl_ipdl_cxx_cgen.py) = d7f7dc85b1ff26c6d7e902f94c0ac09f0449710
SHA1 (patch-ipc_ipdl_ipdl_lower.py) = 2ef9504e462d0e3db16f9b0a6303ea5971b9c2a2
SHA1 (patch-js__src__vm__SPSProfiler.cpp) = 989ba25e4c5308d21d07baa802decce13609a475
SHA1 (patch-js_src_Makefile.in) = fffae52dcb5f267e6553a7de69c21fdfec0f3072
SHA1 (patch-js_src_assembler_jit_ExecutableAllocator.h) = 166a6d0ff566af3032c009498a4d4b43ddc10413
SHA1 (patch-js_src_ctypes_CTypes.h) = 768a084239f92a424c1c7dc9eaaf9be9456ca9f0
SHA1 (patch-js_src_frontend_ParseMaps.cpp) = c00117d79b78904bc50a1d664a8fc0e4e339bfbc
SHA1 (patch-js_src_gc_Memory.cpp) = f2ee54f0a05d870651a3235b95bbb346586f3c3c
SHA1 (patch-js_src_jit_arm_Architecture-arm.cpp) = 67e02c42753d7ffd982ade192a5beb8c5140573c
SHA1 (patch-js_src_jsmath.cpp) = 7d4993ae91e9b5e6820358165603819aefb586f9
SHA1 (patch-js_src_vm_WeakMapPtr.cpp) = c1778892afa6fb97de911221715a14f6dd9cab2f
SHA1 (patch-js_xpconnect_src_xpcprivate.h) = 3406e02342d8a890b83a241ee2cc8cd9b3fefdaa
@ -88,8 +92,10 @@ SHA1 (patch-media_libsoundtouch_src_cpu__detect__x86.cpp) = db61737afa7773e8cbd8
SHA1 (patch-media_libstagefright_frameworks_av_include_media_stagefright_foundation_AString.h) = 1325f23c87d2bb3fc0071b4cf0054e854dfea689
SHA1 (patch-media_libstagefright_frameworks_av_media_libstagefright_foundation_AString.cpp) = fdadd8b7e28290aabae4bab78597b4968f63eaf1
SHA1 (patch-media_libtheora_Makefile.in) = 80ac9cb20760fe4d94047f7cb1c6a16bbdd6b505
SHA1 (patch-media_libtheora_lib_arm_armcpu.c) = 4b215277f9c81154377d401166023c1bf78a3718
SHA1 (patch-media_libtremor_Makefile.in) = 497d03646caa721bdd129de365aadea8466043af
SHA1 (patch-media_libvorbis_Makefile.in) = fd1ce15268eed9c97dd0774398d559cdbed63e53
SHA1 (patch-media_libyuv_include_libyuv_scale__row.h) = 503b6f2065e9d548a1f01f7158d65874f4886c78
SHA1 (patch-media_mtransport_third__party_nICEr_src_util_mbslen.c) = 087e8788c72b96d2059ca82ce4282d78aed546da
SHA1 (patch-media_mtransport_third__party_nrappkit_src_port_generic_include_sys_queue.h) = de7055f891387f96f8fd32366c7189d480354e9e
SHA1 (patch-media_webrtc_signaling_signaling.gyp) = 91d5fcc2409b1398c84dedc9697107f815de48fc
@ -108,7 +114,7 @@ SHA1 (patch-memory_mozalloc_mozalloc__abort.cpp) = 9d9036ddd28c7b8ce37860e0120df
SHA1 (patch-mfbt_Alignment.h) = 9ebc9892fd11eba7105974979b54f4f8759b7749
SHA1 (patch-mfbt_Atomics.h) = c02d517365276cef4cec178993375b54345c0d9a
SHA1 (patch-mfbt_Attributes.h) = 0cf62084c0220a3fa46226714cea5c30b1b155e9
SHA1 (patch-mfbt_Poison.cpp) = a37c8a35637f24813f22588c4c14c28c391384e0
SHA1 (patch-mfbt_Poison.cpp) = f502581db96b3e5eca25a9aa9035f436e9167503
SHA1 (patch-mobile_android_installer_Makefile.in) = b2ea844a0145f4c15ef1b350dde6b000bd231fa5
SHA1 (patch-mobile_android_installer_package-manifest.in) = 9664858fe3b4593eb29dd0180d5a2df3cd245bb2
SHA1 (patch-modules_libjar_nsZipArchive.cpp) = 6aff0f8ed42575d8ca36a524e12e9a1f7351004a
@ -132,13 +138,14 @@ SHA1 (patch-xpcom_build_nsXPComInit.cpp) = 727fd1ce934f129de5fcc5e6059d3cb8c30b1
SHA1 (patch-xpcom_ds_TimeStamp.h) = fe7dd3f21a2998b72f3cb7505002338f36781bed
SHA1 (patch-xpcom_reflect_xptcall_md_unix_Makefile.in) = 1e4d71a06b34556fa9339f97b75a065539baa511
SHA1 (patch-xpcom_reflect_xptcall_md_unix_moz.build) = a0445488bbcabe7a67a83df3d75f2d956e172a1f
SHA1 (patch-xpcom_reflect_xptcall_md_unix_xptcinvoke__arm__netbsd.cpp) = 493d1433ae72bbd0c65ab3f51de43fb864a6f58d
SHA1 (patch-xpcom_reflect_xptcall_md_unix_xptcinvoke__asm__mips.S) = f310105510bb5fa6c6de122ebdebbfa18423d8d3
SHA1 (patch-xpcom_reflect_xptcall_md_unix_xptcinvoke__asm__ppc__netbsd.s) = 8e367d4d0baf24714ee80449fc8d6849ff08ca1c
SHA1 (patch-xpcom_reflect_xptcall_md_unix_xptcinvoke__gcc__x86__unix.cpp) = 8cb72e1bc9a38e3f123539d7bb2aca6793f2b574
SHA1 (patch-xpcom_reflect_xptcall_md_unix_xptcinvoke__netbsd__m68k.cpp) = b6dc5d54eeaa908d914116429e61f4b65a6a5f8e
SHA1 (patch-xpcom_reflect_xptcall_md_unix_xptcinvoke__ppc__netbsd.cpp) = 71a33723a557a6153405f6baec8404b25f841608
SHA1 (patch-xpcom_reflect_xptcall_md_unix_xptcinvoke__sparc64__netbsd.cpp) = 0792c2c626d906e71c5183994cb32bf8cdb3e239
SHA1 (patch-xpcom_reflect_xptcall_md_unix_xptcstubs__arm__netbsd.cpp) = d04f9f5642b41f4f8f37f878a47fa0ca13cc4e38
SHA1 (patch-xpcom_reflect_xptcall_md_unix_xptcstubs__arm__netbsd.cpp) = 31a0f8c82cedab37a3737b93206d8b3ed7a6727f
SHA1 (patch-xpcom_reflect_xptcall_md_unix_xptcstubs__asm__mips.S) = 79a4686d368132aff804f217baa685b2e06bc396
SHA1 (patch-xpcom_reflect_xptcall_md_unix_xptcstubs__asm__sparc64__netbsd.s) = 522bc237bc90d85c98b61a467a431fddd91d8c3c
SHA1 (patch-xpcom_reflect_xptcall_md_unix_xptcstubs__gcc__x86__unix.cpp) = d246f0695eb43d43bfd5571297945a4b7a88f72c

View file

@ -0,0 +1,12 @@
$NetBSD: patch-gfx_ycbcr_yuv__row__arm.s,v 1.1 2014/11/26 14:56:28 ryoon Exp $
--- gfx/ycbcr/yuv_row_arm.s.orig 2014-11-13 22:49:57.000000000 +0000
+++ gfx/ycbcr/yuv_row_arm.s
@@ -299,6 +299,6 @@ s42xbily_neon_done:
.fnend
.size ScaleYCbCr42xToRGB565_BilinearY_Row_NEON, .-ScaleYCbCr42xToRGB565_BilinearY_Row_NEON
-#if defined(__ELF__)&&defined(__linux__)
+#if defined(__ELF__)&&(defined(__linux__) || defined(__NetBSD__))
.section .note.GNU-stack,"",%progbits
#endif

View file

@ -0,0 +1,15 @@
$NetBSD: patch-ipc_chromium_src_base_atomicops.h,v 1.1 2014/11/26 14:56:28 ryoon Exp $
* Avoid invalid cast errors under NetBSD/evbearmv7hf-el.
--- ipc/chromium/src/base/atomicops.h.orig 2014-11-13 22:49:59.000000000 +0000
+++ ipc/chromium/src/base/atomicops.h
@@ -45,7 +45,7 @@ typedef int64_t Atomic64;
// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
// Atomic64 routines below, depending on your architecture.
-#ifdef OS_OPENBSD
+#if defined(OS_OPENBSD) || (defined(OS_NETBSD) && defined(ARCH_CPU_ARM_FAMILY))
#ifdef ARCH_CPU_64_BITS
typedef Atomic64 AtomicWord;
#else

View file

@ -0,0 +1,15 @@
$NetBSD: patch-js_src_assembler_jit_ExecutableAllocator.h,v 1.1 2014/11/26 14:56:28 ryoon Exp $
* Define cacheFlush for NetBSD.
--- js/src/assembler/jit/ExecutableAllocator.h.orig 2014-11-13 22:50:00.000000000 +0000
+++ js/src/assembler/jit/ExecutableAllocator.h
@@ -478,7 +478,7 @@ public:
}
#elif WTF_CPU_ARM_TRADITIONAL && WTF_OS_LINUX && WTF_COMPILER_RVCT
static __asm void cacheFlush(void* code, size_t size);
-#elif WTF_CPU_ARM_TRADITIONAL && (WTF_OS_LINUX || WTF_OS_ANDROID) && WTF_COMPILER_GCC
+#elif WTF_CPU_ARM_TRADITIONAL && (WTF_OS_LINUX || WTF_OS_ANDROID || WTF_OS_NETBSD) && WTF_COMPILER_GCC
static void cacheFlush(void* code, size_t size)
{
asm volatile (

View file

@ -0,0 +1,15 @@
$NetBSD: patch-js_src_jit_arm_Architecture-arm.cpp,v 1.1 2014/11/26 14:56:28 ryoon Exp $
* NetBSD has no asm/hwcap.h (Linux's header file).
--- js/src/jit/arm/Architecture-arm.cpp.orig 2014-11-13 22:50:01.000000000 +0000
+++ js/src/jit/arm/Architecture-arm.cpp
@@ -18,7 +18,7 @@
#define HWCAP_USE_HARDFP_ABI (1 << 27)
-#if !(defined(ANDROID) || defined(MOZ_B2G)) && !defined(JS_ARM_SIMULATOR)
+#if !(defined(ANDROID) || defined(MOZ_B2G) || defined(__NetBSD__))) && !defined(JS_ARM_SIMULATOR)
#define HWCAP_ARMv7 (1 << 28)
#include <asm/hwcap.h>
#else

View file

@ -0,0 +1,25 @@
$NetBSD: patch-media_libtheora_lib_arm_armcpu.c,v 1.1 2014/11/26 14:56:28 ryoon Exp $
* NetBSD/evbearmv7hf-el's /proc/cpuinfo return empty, use hardcoded flags.
--- media/libtheora/lib/arm/armcpu.c.orig 2014-11-13 22:50:10.000000000 +0000
+++ media/libtheora/lib/arm/armcpu.c
@@ -107,6 +107,18 @@ ogg_uint32_t oc_cpu_flags_get(void){
return flags;
}
+#elif defined(__NetBSD__)
+ogg_uint32_t oc_cpu_flags_get(void){
+ ogg_uint32_t flags;
+ /* XXX ryoon: I have no idea about ARM CPU extensions detection mechanism
+ under NetBSD/earm.
+ evbearmv6hf-el machine, Raspberry Pi does not have NEON.
+ evbearmv7hf-el machine, CubieBoard2 does not have EDSP.
+ I have no idea about MEDIA.
+ So I will disable all options. */
+ flags=0;
+ return flags;
+}
#else
/*The feature registers which can tell us what the processor supports are
accessible in priveleged modes only, so we can't have a general user-space

View file

@ -0,0 +1,17 @@
$NetBSD: patch-media_libyuv_include_libyuv_scale__row.h,v 1.1 2014/11/26 14:56:28 ryoon Exp $
--- media/libyuv/include/libyuv/scale_row.h.orig 2014-11-13 22:50:11.000000000 +0000
+++ media/libyuv/include/libyuv/scale_row.h
@@ -219,10 +219,10 @@ void ScaleARGBFilterCols_SSSE3(uint8* ds
void ScaleARGBColsUp2_SSE2(uint8* dst_argb, const uint8* src_argb,
int dst_width, int x, int dx);
// Row functions.
-void ScaleARGBRowDownEven_NEON(const uint8* src_argb, int src_stride,
+void ScaleARGBRowDownEven_NEON(const uint8* src_argb, ptrdiff_t src_stride,
int src_stepx,
uint8* dst_argb, int dst_width);
-void ScaleARGBRowDownEvenBox_NEON(const uint8* src_argb, int src_stride,
+void ScaleARGBRowDownEvenBox_NEON(const uint8* src_argb, ptrdiff_t src_stride,
int src_stepx,
uint8* dst_argb, int dst_width);
void ScaleARGBRowDown2_NEON(const uint8* src_ptr, ptrdiff_t src_stride,

View file

@ -1,18 +1,17 @@
$NetBSD: patch-mfbt_Poison.cpp,v 1.5 2014/10/15 13:07:07 ryoon Exp $
$NetBSD: patch-mfbt_Poison.cpp,v 1.6 2014/11/26 14:56:28 ryoon Exp $
* Use posix_madvise(3) instead of madvise(3) for Solaris/SunOS.
Solaris/SunOS's madvise(3) is not enabled in this context,
and its first argument type is different from NetBSD/Linux's one.
--- mfbt/Poison.cpp.orig 2014-10-11 09:06:41.000000000 +0000
--- mfbt/Poison.cpp.orig 2014-11-13 22:50:12.000000000 +0000
+++ mfbt/Poison.cpp
@@ -129,7 +129,11 @@ ReleaseRegion(void* aRegion, uintptr_t a
static bool
ProbeRegion(uintptr_t aRegion, uintptr_t aSize)
{
- if (madvise(reinterpret_cast<void*>(aRegion), aSize, MADV_NORMAL)) {
+#if !defined(__sun__)
+ if (madvise(reinterpret_cast<void*>(aRegion), aSize, MADV_NORMAL)) {
if (madvise(reinterpret_cast<void*>(aRegion), aSize, MADV_NORMAL)) {
+#else
+ if (posix_madvise(reinterpret_cast<void*>(aRegion), aSize, MADV_NORMAL)) {
+#endif

View file

@ -0,0 +1,527 @@
$NetBSD: patch-xpcom_reflect_xptcall_md_unix_xptcinvoke__arm__netbsd.cpp,v 1.1 2014/11/26 14:56:28 ryoon Exp $
--- xpcom/reflect/xptcall/md/unix/xptcinvoke_arm_netbsd.cpp.orig 2014-11-13 22:50:21.000000000 +0000
+++ xpcom/reflect/xptcall/md/unix/xptcinvoke_arm_netbsd.cpp
@@ -7,82 +7,92 @@
#include "xptcprivate.h"
-// Remember that these 'words' are 32bit DWORDS
+#include "mozilla/Compiler.h"
-static uint32_t
-invoke_count_words(uint32_t paramCount, nsXPTCVariant* s)
+#ifndef __ARM_PCS_VFP
+
+/* This function copies a 64-bits word from dw to the given pointer in
+ * a buffer delimited by start and end, possibly wrapping around the
+ * buffer boundaries, and/or properly aligning the data at 64-bits word
+ * boundaries (for EABI).
+ * start and end are both assumed to be 64-bits aligned.
+ * Returns a pointer to the second 32-bits word copied (to accomodate
+ * the invoke_copy_to_stack loop).
+ */
+static uint32_t *
+copy_double_word(uint32_t *start, uint32_t *current, uint32_t *end, uint64_t *dw)
{
- uint32_t result = 0;
- for(uint32_t i = 0; i < paramCount; i++, s++)
- {
- if(s->IsPtrData())
- {
- result++;
- continue;
- }
- switch(s->type)
- {
- case nsXPTType::T_I8 :
- case nsXPTType::T_I16 :
- case nsXPTType::T_I32 :
- result++;
- break;
- case nsXPTType::T_I64 :
- result+=2;
- break;
- case nsXPTType::T_U8 :
- case nsXPTType::T_U16 :
- case nsXPTType::T_U32 :
- result++;
- break;
- case nsXPTType::T_U64 :
- result+=2;
- break;
- case nsXPTType::T_FLOAT :
- result++;
- break;
- case nsXPTType::T_DOUBLE :
- result+=2;
- break;
- case nsXPTType::T_BOOL :
- case nsXPTType::T_CHAR :
- case nsXPTType::T_WCHAR :
- result++;
- break;
- default:
- // all the others are plain pointer types
- result++;
- break;
- }
+#ifdef __ARM_EABI__
+ /* Aligning the pointer for EABI */
+ current = (uint32_t *)(((uint32_t)current + 7) & ~7);
+ /* Wrap when reaching the end of the buffer */
+ if (current == end) current = start;
+#else
+ /* On non-EABI, 64-bits values are not aligned and when we reach the end
+ * of the buffer, we need to write half of the data at the end, and the
+ * other half at the beginning. */
+ if (current == end - 1) {
+ *current = ((uint32_t*)dw)[0];
+ *start = ((uint32_t*)dw)[1];
+ return start;
}
- return result;
+#endif
+
+ *((uint64_t*) current) = *dw;
+ return current + 1;
}
-static void
-invoke_copy_to_stack(uint32_t* d, uint32_t paramCount, nsXPTCVariant* s)
+/* See stack_space comment in NS_InvokeByIndex to see why this needs not to
+ * be static on DEBUG builds. */
+#ifndef DEBUG
+static
+#endif
+void
+invoke_copy_to_stack(uint32_t* stk, uint32_t *end,
+ uint32_t paramCount, nsXPTCVariant* s)
{
+ /* The stack buffer is 64-bits aligned. The end argument points to its end.
+ * The caller is assumed to create a stack buffer of at least four 32-bits
+ * words.
+ * We use the last three 32-bit words to store the values for r1, r2 and r3
+ * for the method call, i.e. the first words for arguments passing.
+ */
+ uint32_t *d = end - 3;
for(uint32_t i = 0; i < paramCount; i++, d++, s++)
{
+ /* Wrap when reaching the end of the stack buffer */
+ if (d == end) d = stk;
+ NS_ASSERTION(d >= stk && d < end,
+ "invoke_copy_to_stack is copying outside its given buffer");
if(s->IsPtrData())
{
*((void**)d) = s->ptr;
continue;
}
+ // According to the ARM EABI, integral types that are smaller than a word
+ // are to be sign/zero-extended to a full word and treated as 4-byte values.
+
switch(s->type)
{
- case nsXPTType::T_I8 : *((int8_t*) d) = s->val.i8; break;
- case nsXPTType::T_I16 : *((int16_t*) d) = s->val.i16; break;
+ case nsXPTType::T_I8 : *((int32_t*) d) = s->val.i8; break;
+ case nsXPTType::T_I16 : *((int32_t*) d) = s->val.i16; break;
case nsXPTType::T_I32 : *((int32_t*) d) = s->val.i32; break;
- case nsXPTType::T_I64 : *((int64_t*) d) = s->val.i64; d++; break;
- case nsXPTType::T_U8 : *((uint8_t*) d) = s->val.u8; break;
- case nsXPTType::T_U16 : *((uint16_t*)d) = s->val.u16; break;
+ case nsXPTType::T_I64 :
+ d = copy_double_word(stk, d, end, (uint64_t *)&s->val.i64);
+ break;
+ case nsXPTType::T_U8 : *((uint32_t*)d) = s->val.u8; break;
+ case nsXPTType::T_U16 : *((uint32_t*)d) = s->val.u16; break;
case nsXPTType::T_U32 : *((uint32_t*)d) = s->val.u32; break;
- case nsXPTType::T_U64 : *((uint64_t*)d) = s->val.u64; d++; break;
+ case nsXPTType::T_U64 :
+ d = copy_double_word(stk, d, end, (uint64_t *)&s->val.u64);
+ break;
case nsXPTType::T_FLOAT : *((float*) d) = s->val.f; break;
- case nsXPTType::T_DOUBLE : *((double*) d) = s->val.d; d++; break;
- case nsXPTType::T_BOOL : *((bool*) d) = s->val.b; break;
- case nsXPTType::T_CHAR : *((char*) d) = s->val.c; break;
- case nsXPTType::T_WCHAR : *((wchar_t*) d) = s->val.wc; break;
+ case nsXPTType::T_DOUBLE :
+ d = copy_double_word(stk, d, end, (uint64_t *)&s->val.d);
+ break;
+ case nsXPTType::T_BOOL : *((int32_t*) d) = s->val.b; break;
+ case nsXPTType::T_CHAR : *((int32_t*) d) = s->val.c; break;
+ case nsXPTType::T_WCHAR : *((int32_t*) d) = s->val.wc; break;
default:
// all the others are plain pointer types
*((void**)d) = s->val.p;
@@ -91,47 +101,27 @@ invoke_copy_to_stack(uint32_t* d, uint32
}
}
-extern "C"
-struct my_params_struct {
- nsISupports* that;
- uint32_t Index;
- uint32_t Count;
- nsXPTCVariant* params;
- uint32_t fn_count;
- uint32_t fn_copy;
-};
+typedef nsresult (*vtable_func)(nsISupports *, uint32_t, uint32_t, uint32_t);
-XPTC_PUBLIC_API(nsresult)
-XPTC_InvokeByIndex(nsISupports* that, uint32_t methodIndex,
+EXPORT_XPCOM_API(nsresult)
+NS_InvokeByIndex(nsISupports* that, uint32_t methodIndex,
uint32_t paramCount, nsXPTCVariant* params)
{
- uint32_t result;
- struct my_params_struct my_params;
- my_params.that = that;
- my_params.Index = methodIndex;
- my_params.Count = paramCount;
- my_params.params = params;
- my_params.fn_copy = (uint32_t) &invoke_copy_to_stack;
- my_params.fn_count = (uint32_t) &invoke_count_words;
/* This is to call a given method of class that.
* The parameters are in params, the number is in paramCount.
* The routine will issue calls to count the number of words
* required for argument passing and to copy the arguments to
* the stack.
- * Since APCS passes the first 3 params in r1-r3, we need to
- * load the first three words from the stack and correct the stack
- * pointer (sp) in the appropriate way. This means:
- *
- * 1.) more than 3 arguments: load r1-r3, correct sp and remember No.
- * of bytes left on the stack in r4
- *
- * 2.) <= 2 args: load r1-r3 (we won't be causing a stack overflow I hope),
- * restore sp as if nothing had happened and set the marker r4 to zero.
- *
- * Afterwards sp will be restored using the value in r4 (which is not a temporary register
- * and will be preserved by the function/method called according to APCS [ARM Procedure
- * Calling Standard]).
+ * ACPS passes the first 3 params in r1-r3 (with exceptions for 64-bits
+ * arguments), and the remaining goes onto the stack.
+ * We allocate a buffer on the stack for a "worst case" estimate of how much
+ * stack might be needed for EABI, i.e. twice the number of parameters.
+ * The end of this buffer will be used to store r1 to r3, so that the start
+ * of the stack is the remaining parameters.
+ * The magic here is to call the method with "that" and three 32-bits
+ * arguments corresponding to r1-r3, so that the compiler generates the
+ * proper function call. The stack will also contain the remaining arguments.
*
* !!! IMPORTANT !!!
* This routine makes assumptions about the vtable layout of the c++ compiler. It's implemented
@@ -139,43 +129,272 @@ XPTC_InvokeByIndex(nsISupports* that, ui
*
*/
- __asm__ __volatile__(
- "ldr r1, [%1, #12] \n\t" /* prepare to call invoke_count_words */
- "ldr ip, [%1, #16] \n\t" /* r0=paramCount, r1=params */
- "ldr r0, [%1, #8] \n\t"
- "mov lr, pc \n\t" /* call it... */
- "mov pc, ip \n\t"
- "mov r4, r0, lsl #2 \n\t" /* This is the amount of bytes needed. */
- "sub sp, sp, r4 \n\t" /* use stack space for the args... */
- "mov r0, sp \n\t" /* prepare a pointer an the stack */
- "ldr r1, [%1, #8] \n\t" /* =paramCount */
- "ldr r2, [%1, #12] \n\t" /* =params */
- "ldr ip, [%1, #20] \n\t" /* =invoke_copy_to_stack */
- "mov lr, pc \n\t" /* copy args to the stack like the */
- "mov pc, ip \n\t" /* compiler would. */
- "ldr r0, [%1] \n\t" /* =that */
- "ldr r1, [r0, #0] \n\t" /* get that->vtable offset */
- "ldr r2, [%1, #4] \n\t"
- "add r2, r1, r2, lsl #3\n\t" /* a vtable_entry(x)=8 + (8 bytes * x) */
- "add r2, r2, #8 \n\t" /* with this compilers */
- "ldr r3, [r2] \n\t" /* get virtual offset from vtable */
- "mov r3, r3, lsl #16 \n\t"
- "add r0, r0, r3, asr #16\n\t"
- "ldr ip, [r2, #4] \n\t" /* get method address from vtable */
- "cmp r4, #12 \n\t" /* more than 3 arguments??? */
- "ldmgtia sp!, {r1, r2, r3}\n\t" /* yes: load arguments for r1-r3 */
- "subgt r4, r4, #12 \n\t" /* and correct the stack pointer */
- "ldmleia sp, {r1, r2, r3}\n\t" /* no: load r1-r3 from stack */
- "addle sp, sp, r4 \n\t" /* and restore stack pointer */
- "movle r4, #0 \n\t" /* a mark for restoring sp */
- "mov lr, pc \n\t" /* call mathod */
- "mov pc, ip \n\t"
- "add sp, sp, r4 \n\t" /* restore stack pointer */
- "mov %0, r0 \n\t" /* the result... */
- : "=r" (result)
- : "r" (&my_params)
- : "r0", "r1", "r2", "r3", "r4", "ip", "lr"
- );
-
- return result;
+ vtable_func *vtable, func;
+ int base_size = (paramCount > 1) ? paramCount : 2;
+
+/* !!! IMPORTANT !!!
+ * On DEBUG builds, the NS_ASSERTION used in invoke_copy_to_stack needs to use
+ * the stack to pass the 5th argument to NS_DebugBreak. When invoke_copy_to_stack
+ * is inlined, this can result, depending on the compiler and flags, in the
+ * stack pointer not pointing at stack_space when the method is called at the
+ * end of this function. More generally, any function call requiring stack
+ * allocation of arguments is unsafe to be inlined in this function.
+ */
+ uint32_t *stack_space = (uint32_t *) __builtin_alloca(base_size * 8);
+
+ invoke_copy_to_stack(stack_space, &stack_space[base_size * 2],
+ paramCount, params);
+
+ vtable = *reinterpret_cast<vtable_func **>(that);
+ func = vtable[methodIndex];
+
+ return func(that, stack_space[base_size * 2 - 3],
+ stack_space[base_size * 2 - 2],
+ stack_space[base_size * 2 - 1]);
}
+
+#else /* __ARM_PCS_VFP */
+
+/* "Procedure Call Standard for the ARM Architecture" document, sections
+ * "5.5 Parameter Passing" and "6.1.2 Procedure Calling" contain all the
+ * needed information.
+ *
+ * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0042d/IHI0042D_aapcs.pdf
+ */
+
+#if defined(__thumb__) && !defined(__thumb2__)
+#error "Thumb1 is not supported"
+#endif
+
+#ifndef __ARMEL__
+#error "Only little endian compatibility was tested"
+#endif
+
+/*
+ * Allocation of integer function arguments initially to registers r1-r3
+ * and then to stack. Handling of 'this' argument which goes to r0 registers
+ * is handled separately and does not belong to these two inline functions.
+ *
+ * The doubleword arguments are allocated to even:odd
+ * register pairs or get aligned at 8-byte boundary on stack. The "holes"
+ * which may appear as a result of this realignment remain unused.
+ *
+ * 'ireg_args' - pointer to the current position in the buffer,
+ * corresponding to the register arguments
+ * 'stack_args' - pointer to the current position in the buffer,
+ * corresponding to the arguments on stack
+ * 'end' - pointer to the end of the registers argument
+ * buffer (it is guaranteed to be 8-bytes aligned)
+ */
+
+static inline void copy_word(uint32_t* &ireg_args,
+ uint32_t* &stack_args,
+ uint32_t* end,
+ uint32_t data)
+{
+ if (ireg_args < end) {
+ *ireg_args = data;
+ ireg_args++;
+ } else {
+ *stack_args = data;
+ stack_args++;
+ }
+}
+
+static inline void copy_dword(uint32_t* &ireg_args,
+ uint32_t* &stack_args,
+ uint32_t* end,
+ uint64_t data)
+{
+ if (ireg_args + 1 < end) {
+ if ((uint32_t)ireg_args & 4) {
+ ireg_args++;
+ }
+ *(uint64_t *)ireg_args = data;
+ ireg_args += 2;
+ } else {
+ if ((uint32_t)stack_args & 4) {
+ stack_args++;
+ }
+ *(uint64_t *)stack_args = data;
+ stack_args += 2;
+ }
+}
+
+/*
+ * Allocation of floating point arguments to VFP registers (s0-s15, d0-d7).
+ *
+ * Unlike integer registers allocation, "back-filling" needs to be
+ * supported. For example, the third floating point argument in the
+ * following function is going to be allocated to s1 register, back-filling
+ * the "hole":
+ * void f(float s0, double d1, float s1)
+ *
+ * Refer to the "Procedure Call Standard for the ARM Architecture" document
+ * for more details.
+ *
+ * 'vfp_s_args' - pointer to the current position in the buffer with
+ * the next unallocated single precision register
+ * 'vfp_d_args' - pointer to the current position in the buffer with
+ * the next unallocated double precision register,
+ * it has the same value as 'vfp_s_args' when back-filling
+ * is not used
+ * 'end' - pointer to the end of the vfp registers argument
+ * buffer (it is guaranteed to be 8-bytes aligned)
+ *
+ * Mozilla bugtracker has a test program attached which be used for
+ * experimenting with VFP registers allocation code and testing its
+ * correctness:
+ * https://bugzilla.mozilla.org/show_bug.cgi?id=601914#c19
+ */
+
+static inline bool copy_vfp_single(float* &vfp_s_args, double* &vfp_d_args,
+ float* end, float data)
+{
+ if (vfp_s_args >= end)
+ return false;
+
+ *vfp_s_args = data;
+ vfp_s_args++;
+ if (vfp_s_args < (float *)vfp_d_args) {
+ // It was the case of back-filling, now the next free single precision
+ // register should overlap with the next free double precision register
+ vfp_s_args = (float *)vfp_d_args;
+ } else if (vfp_s_args > (float *)vfp_d_args) {
+ // also update the pointer to the next free double precision register
+ vfp_d_args++;
+ }
+ return true;
+}
+
+static inline bool copy_vfp_double(float* &vfp_s_args, double* &vfp_d_args,
+ float* end, double data)
+{
+ if (vfp_d_args >= (double *)end) {
+ // The back-filling continues only so long as no VFP CPRC has been
+ // allocated to a slot on the stack. Basically no VFP registers can
+ // be allocated after this point.
+ vfp_s_args = end;
+ return false;
+ }
+
+ if (vfp_s_args == (float *)vfp_d_args) {
+ // also update the pointer to the next free single precision register
+ vfp_s_args += 2;
+ }
+ *vfp_d_args = data;
+ vfp_d_args++;
+ return true;
+}
+
+static void
+invoke_copy_to_stack(uint32_t* stk, uint32_t *end,
+ uint32_t paramCount, nsXPTCVariant* s)
+{
+ uint32_t *ireg_args = end - 3;
+ float *vfp_s_args = (float *)end;
+ double *vfp_d_args = (double *)end;
+ float *vfp_end = vfp_s_args + 16;
+
+ for (uint32_t i = 0; i < paramCount; i++, s++) {
+ if (s->IsPtrData()) {
+ copy_word(ireg_args, stk, end, (uint32_t)s->ptr);
+ continue;
+ }
+ // According to the ARM EABI, integral types that are smaller than a word
+ // are to be sign/zero-extended to a full word and treated as 4-byte values
+ switch (s->type)
+ {
+ case nsXPTType::T_FLOAT:
+ if (!copy_vfp_single(vfp_s_args, vfp_d_args, vfp_end, s->val.f)) {
+ copy_word(end, stk, end, reinterpret_cast<uint32_t&>(s->val.f));
+ }
+ break;
+ case nsXPTType::T_DOUBLE:
+ if (!copy_vfp_double(vfp_s_args, vfp_d_args, vfp_end, s->val.d)) {
+ copy_dword(end, stk, end, reinterpret_cast<uint64_t&>(s->val.d));
+ }
+ break;
+ case nsXPTType::T_I8: copy_word(ireg_args, stk, end, s->val.i8); break;
+ case nsXPTType::T_I16: copy_word(ireg_args, stk, end, s->val.i16); break;
+ case nsXPTType::T_I32: copy_word(ireg_args, stk, end, s->val.i32); break;
+ case nsXPTType::T_I64: copy_dword(ireg_args, stk, end, s->val.i64); break;
+ case nsXPTType::T_U8: copy_word(ireg_args, stk, end, s->val.u8); break;
+ case nsXPTType::T_U16: copy_word(ireg_args, stk, end, s->val.u16); break;
+ case nsXPTType::T_U32: copy_word(ireg_args, stk, end, s->val.u32); break;
+ case nsXPTType::T_U64: copy_dword(ireg_args, stk, end, s->val.u64); break;
+ case nsXPTType::T_BOOL: copy_word(ireg_args, stk, end, s->val.b); break;
+ case nsXPTType::T_CHAR: copy_word(ireg_args, stk, end, s->val.c); break;
+ case nsXPTType::T_WCHAR: copy_word(ireg_args, stk, end, s->val.wc); break;
+ default:
+ // all the others are plain pointer types
+ copy_word(ireg_args, stk, end, reinterpret_cast<uint32_t>(s->val.p));
+ break;
+ }
+ }
+}
+
+typedef uint32_t (*vtable_func)(nsISupports *, uint32_t, uint32_t, uint32_t);
+
+EXPORT_XPCOM_API(nsresult)
+NS_InvokeByIndex(nsISupports* that, uint32_t methodIndex,
+ uint32_t paramCount, nsXPTCVariant* params)
+{
+ vtable_func *vtable = *reinterpret_cast<vtable_func **>(that);
+ vtable_func func = vtable[methodIndex];
+ // 'register uint32_t result asm("r0")' could be used here, but it does not
+ // seem to be reliable in all cases: http://gcc.gnu.org/PR46164
+ nsresult result;
+ asm (
+ "mov r3, sp\n"
+ "mov %[stack_space_size], %[param_count_plus_2], lsl #3\n"
+ "tst r3, #4\n" /* check stack alignment */
+
+ "add %[stack_space_size], #(4 * 16)\n" /* space for VFP registers */
+ "mov r3, %[params]\n"
+
+ "it ne\n"
+ "addne %[stack_space_size], %[stack_space_size], #4\n"
+ "sub r0, sp, %[stack_space_size]\n" /* allocate space on stack */
+
+ "sub r2, %[param_count_plus_2], #2\n"
+ "mov sp, r0\n"
+
+ "add r1, r0, %[param_count_plus_2], lsl #3\n"
+ "blx %[invoke_copy_to_stack]\n"
+
+ "add ip, sp, %[param_count_plus_2], lsl #3\n"
+ "mov r0, %[that]\n"
+ "ldmdb ip, {r1, r2, r3}\n"
+ "vldm ip, {d0, d1, d2, d3, d4, d5, d6, d7}\n"
+ "blx %[func]\n"
+
+ "add sp, sp, %[stack_space_size]\n" /* cleanup stack */
+ "mov %[stack_space_size], r0\n" /* it's actually 'result' variable */
+ : [stack_space_size] "=&r" (result)
+ : [func] "r" (func),
+ [that] "r" (that),
+ [params] "r" (params),
+ [param_count_plus_2] "r" (paramCount + 2),
+ [invoke_copy_to_stack] "r" (invoke_copy_to_stack)
+ : "cc", "memory",
+ // Mark all the scratch registers as clobbered because they may be
+ // modified by the functions, called from this inline assembly block
+ "r0", "r1", "r2", "r3", "ip", "lr",
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+ // Also unconditionally mark d16-d31 registers as clobbered even though
+ // they actually don't exist in vfpv2 and vfpv3-d16 variants. There is
+ // no way to identify VFP variant using preprocessor at the momemnt
+ // (see http://gcc.gnu.org/PR46128 for more details), but fortunately
+ // current versions of gcc do not seem to complain about these registers
+ // even when this code is compiled with '-mfpu=vfpv3-d16' option.
+ // If gcc becomes more strict in the future and/or provides a way to
+ // identify VFP variant, the following d16-d31 registers list needs
+ // to be wrapped into some #ifdef
+ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+ "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"
+ );
+ return result;
+}
+
+#endif

View file

@ -1,31 +1,221 @@
$NetBSD: patch-xpcom_reflect_xptcall_md_unix_xptcstubs__arm__netbsd.cpp,v 1.1 2014/10/15 13:43:32 ryoon Exp $
$NetBSD: patch-xpcom_reflect_xptcall_md_unix_xptcstubs__arm__netbsd.cpp,v 1.2 2014/11/26 14:56:28 ryoon Exp $
--- xpcom/reflect/xptcall/md/unix/xptcstubs_arm_netbsd.cpp.orig 2014-10-11 09:06:50.000000000 +0000
--- xpcom/reflect/xptcall/md/unix/xptcstubs_arm_netbsd.cpp.orig 2014-11-13 22:50:21.000000000 +0000
+++ xpcom/reflect/xptcall/md/unix/xptcstubs_arm_netbsd.cpp
@@ -86,18 +86,23 @@ PrepareAndDispatch(nsXPTCStubBase* self,
* so they are contiguous with values passed on the stack, and then calls
* PrepareAndDispatch() to do the dirty work.
*/
+#ifndef __ELF__
+#define SYMBOLPREFIX "_"
@@ -6,15 +6,32 @@
/* Implement shared vtbl methods. */
#include "xptcprivate.h"
+#include "xptiprivate.h"
-nsresult ATTRIBUTE_USED
+/* Specify explicitly a symbol for this function, don't try to guess the c++ mangled symbol. */
+static nsresult PrepareAndDispatch(nsXPTCStubBase* self, uint32_t methodIndex, uint32_t* args) asm("_PrepareAndDispatch")
+ATTRIBUTE_USED;
+
+#ifdef __ARM_EABI__
+#define DOUBLEWORD_ALIGN(p) ((uint32_t *)((((uint32_t)(p)) + 7) & 0xfffffff8))
+#else
+#define SYMBOLPREFIX
+#define DOUBLEWORD_ALIGN(p) (p)
+#endif
+
+// Apple's iOS toolchain is lame and does not support .cfi directives.
+#ifdef __APPLE__
+#define CFI(str)
+#else
+#define CFI(str) str
+#endif
+
+static nsresult
PrepareAndDispatch(nsXPTCStubBase* self, uint32_t methodIndex, uint32_t* args)
{
#define PARAM_BUFFER_COUNT 16
nsXPTCMiniVariant paramBuffer[PARAM_BUFFER_COUNT];
nsXPTCMiniVariant* dispatchParams = nullptr;
- nsIInterfaceInfo* iface_info = nullptr;
const nsXPTMethodInfo* info;
uint8_t paramCount;
uint8_t i;
@@ -22,12 +39,7 @@ PrepareAndDispatch(nsXPTCStubBase* self,
NS_ASSERTION(self,"no self");
- self->GetInterfaceInfo(&iface_info);
- NS_ASSERTION(iface_info,"no interface info");
-
- iface_info->GetMethodInfo(uint16_t(methodIndex), &info);
- NS_ASSERTION(info,"no interface info");
-
+ self->mEntry->GetMethodInfo(uint16_t(methodIndex), &info);
paramCount = info->GetParamCount();
// setup variant array pointer
@@ -55,13 +67,16 @@ PrepareAndDispatch(nsXPTCStubBase* self,
case nsXPTType::T_I8 : dp->val.i8 = *((int8_t*) ap); break;
case nsXPTType::T_I16 : dp->val.i16 = *((int16_t*) ap); break;
case nsXPTType::T_I32 : dp->val.i32 = *((int32_t*) ap); break;
- case nsXPTType::T_I64 : dp->val.i64 = *((int64_t*) ap); ap++; break;
+ case nsXPTType::T_I64 : ap = DOUBLEWORD_ALIGN(ap);
+ dp->val.i64 = *((int64_t*) ap); ap++; break;
case nsXPTType::T_U8 : dp->val.u8 = *((uint8_t*) ap); break;
case nsXPTType::T_U16 : dp->val.u16 = *((uint16_t*)ap); break;
case nsXPTType::T_U32 : dp->val.u32 = *((uint32_t*)ap); break;
- case nsXPTType::T_U64 : dp->val.u64 = *((uint64_t*)ap); ap++; break;
+ case nsXPTType::T_U64 : ap = DOUBLEWORD_ALIGN(ap);
+ dp->val.u64 = *((uint64_t*)ap); ap++; break;
case nsXPTType::T_FLOAT : dp->val.f = *((float*) ap); break;
- case nsXPTType::T_DOUBLE : dp->val.d = *((double*) ap); ap++; break;
+ case nsXPTType::T_DOUBLE : ap = DOUBLEWORD_ALIGN(ap);
+ dp->val.d = *((double*) ap); ap++; break;
case nsXPTType::T_BOOL : dp->val.b = *((bool*) ap); break;
case nsXPTType::T_CHAR : dp->val.c = *((char*) ap); break;
case nsXPTType::T_WCHAR : dp->val.wc = *((wchar_t*) ap); break;
@@ -71,9 +86,7 @@ PrepareAndDispatch(nsXPTCStubBase* self,
}
}
- result = self->CallMethod((uint16_t)methodIndex, info, dispatchParams);
-
- NS_RELEASE(iface_info);
+ result = self->mOuter->CallMethod((uint16_t)methodIndex, info, dispatchParams);
if(dispatchParams != paramBuffer)
delete [] dispatchParams;
@@ -82,26 +95,118 @@ PrepareAndDispatch(nsXPTCStubBase* self,
}
/*
- * These stubs move just move the values passed in registers onto the stack,
- * so they are contiguous with values passed on the stack, and then calls
- * PrepareAndDispatch() to do the dirty work.
+ * This is our shared stub.
+ *
+ * r0 = Self.
+ *
+ * The Rules:
+ * We pass an (undefined) number of arguments into this function.
+ * The first 3 C++ arguments are in r1 - r3, the rest are built
+ * by the calling function on the stack.
+ *
+ * We are allowed to corrupt r0 - r3, ip, and lr.
+ *
+ * Other Info:
+ * We pass the stub number in using `ip'.
+ *
+ * Implementation:
+ * - We save r1 to r3 inclusive onto the stack, which will be
+ * immediately below the caller saved arguments.
+ * - setup r2 (PrepareAndDispatch's args pointer) to point at
+ * the base of all these arguments
+ * - Save LR (for the return address)
+ * - Set r1 (PrepareAndDispatch's methodindex argument) from ip
+ * - r0 is passed through (self)
+ * - Call PrepareAndDispatch
+ * - When the call returns, we return by loading the PC off the
+ * stack, and undoing the stack (one instruction)!
+ *
*/
+__asm__ ("\n"
+ ".text\n"
+ ".align 2\n"
+ "SharedStub:\n"
+ ".fnstart\n"
+ CFI(".cfi_startproc\n")
+ "stmfd sp!, {r1, r2, r3}\n"
+ ".save {r1, r2, r3}\n"
+ CFI(".cfi_def_cfa_offset 12\n")
+ CFI(".cfi_offset r3, -4\n")
+ CFI(".cfi_offset r2, -8\n")
+ CFI(".cfi_offset r1, -12\n")
+ "mov r2, sp\n"
+ "str lr, [sp, #-4]!\n"
+ ".save {lr}\n"
+ CFI(".cfi_def_cfa_offset 16\n")
+ CFI(".cfi_offset lr, -16\n")
+ "mov r1, ip\n"
+ "bl _PrepareAndDispatch\n"
+ "ldr pc, [sp], #16\n"
+ CFI(".cfi_endproc\n")
+ ".fnend");
+
+/*
+ * Create sets of stubs to call the SharedStub.
+ * We don't touch the stack here, nor any registers, other than IP.
+ * IP is defined to be corruptable by a called function, so we are
+ * safe to use it.
+ *
+ * This will work with or without optimisation.
+ */
+
+/*
+ * Note : As G++3 ABI contains the length of the functionname in the
+ * mangled name, it is difficult to get a generic assembler mechanism like
+ * in the G++ 2.95 case.
+ * Create names would be like :
+ * _ZN14nsXPTCStubBase5Stub9Ev
+ * _ZN14nsXPTCStubBase6Stub13Ev
+ * _ZN14nsXPTCStubBase7Stub144Ev
+ * Use the assembler directives to get the names right...
+ */
+
+#define STUB_ENTRY(n) \
+ __asm__( \
+ ".section \".text\"\n" \
+" .align 2\n" \
+" .iflt ("#n" - 10)\n" \
+" .globl _ZN14nsXPTCStubBase5Stub"#n"Ev\n" \
+" .type _ZN14nsXPTCStubBase5Stub"#n"Ev,#function\n" \
+"_ZN14nsXPTCStubBase5Stub"#n"Ev:\n" \
+" .else\n" \
+" .iflt ("#n" - 100)\n" \
+" .globl _ZN14nsXPTCStubBase6Stub"#n"Ev\n" \
+" .type _ZN14nsXPTCStubBase6Stub"#n"Ev,#function\n" \
+"_ZN14nsXPTCStubBase6Stub"#n"Ev:\n" \
+" .else\n" \
+" .iflt ("#n" - 1000)\n" \
+" .globl _ZN14nsXPTCStubBase7Stub"#n"Ev\n" \
+" .type _ZN14nsXPTCStubBase7Stub"#n"Ev,#function\n" \
+"_ZN14nsXPTCStubBase7Stub"#n"Ev:\n" \
+" .else\n" \
+" .err \"stub number "#n"> 1000 not yet supported\"\n" \
+" .endif\n" \
+" .endif\n" \
+" .endif\n" \
+" mov ip, #"#n"\n" \
+" b SharedStub\n\t");
+
+#if 0
+/*
+ * This part is left in as comment : this is how the method definition
+ * should look like.
+ */
+
+#define STUB_ENTRY(n) \
+nsresult nsXPTCStubBase::Stub##n () \
+{ \
+ __asm__ ( \
+" mov ip, #"#n"\n" \
+" b SharedStub\n\t"); \
+ return 0; /* avoid warnings */ \
+}
+#endif
#define STUB_ENTRY(n) \
__asm__( \
-#define STUB_ENTRY(n) \
-__asm__( \
- ".global _Stub"#n"__14nsXPTCStubBase\n\t" \
-"_Stub"#n"__14nsXPTCStubBase:\n\t" \
+ ".global "SYMBOLPREFIX"Stub"#n"__14nsXPTCStubBase\n\t" \
+SYMBOLPREFIX"Stub"#n"__14nsXPTCStubBase:\n\t" \
"stmfd sp!, {r1, r2, r3} \n\t" \
"mov ip, sp \n\t" \
"stmfd sp!, {fp, ip, lr, pc} \n\t" \
"sub fp, ip, #4 \n\t" \
"mov r1, #"#n" \n\t" /* = methodIndex */ \
"add r2, sp, #16 \n\t" \
- "stmfd sp!, {r1, r2, r3} \n\t" \
- "mov ip, sp \n\t" \
- "stmfd sp!, {fp, ip, lr, pc} \n\t" \
- "sub fp, ip, #4 \n\t" \
- "mov r1, #"#n" \n\t" /* = methodIndex */ \
- "add r2, sp, #16 \n\t" \
- "bl _PrepareAndDispatch__FP14nsXPTCStubBaseUiPUi \n\t" \
+ "bl "SYMBOLPREFIX"PrepareAndDispatch__FP14nsXPTCStubBaseUiPUi \n\t" \
"ldmea fp, {fp, sp, lr} \n\t" \
"add sp, sp, #12 \n\t" \
"mov pc, lr \n\t" \
- "ldmea fp, {fp, sp, lr} \n\t" \
- "add sp, sp, #12 \n\t" \
- "mov pc, lr \n\t" \
-);
#define SENTINEL_ENTRY(n) \
nsresult nsXPTCStubBase::Sentinel##n() \