1
0
Fork 0

Adding upstream version 1.65.7.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-19 21:29:57 +02:00
parent 5189956325
commit 32b8eb3fd7
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4153 changed files with 2487292 additions and 0 deletions

View file

@ -0,0 +1,139 @@
#include "libc.h"
#if __ARM_ARCH_4__ || __ARM_ARCH_4T__ || __ARM_ARCH == 4
#define BLX "mov lr,pc\n\tbx"
#else
#define BLX "blx"
#endif
extern hidden uintptr_t __a_cas_ptr, __a_barrier_ptr;
#if ((__ARM_ARCH_6__ || __ARM_ARCH_6K__ || __ARM_ARCH_6KZ__ || __ARM_ARCH_6ZK__) && !__thumb__) \
|| __ARM_ARCH_6T2__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH >= 7
#define a_ll a_ll
static inline int a_ll(volatile int *p)
#ifndef __CCGO__
{
int v;
__asm__ __volatile__ ("ldrex %0, %1" : "=r"(v) : "Q"(*p));
return v;
}
#else
;
#endif
#define a_sc a_sc
static inline int a_sc(volatile int *p, int v)
#ifndef __CCGO__
{
int r;
__asm__ __volatile__ ("strex %0,%2,%1" : "=&r"(r), "=Q"(*p) : "r"(v) : "memory");
return !r;
}
#else
;
#endif
#if __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH >= 7
#define a_barrier a_barrier
static inline void a_barrier()
#ifndef __CCGO__
{
__asm__ __volatile__ ("dmb ish" : : : "memory");
}
#else
;
#endif
#endif
#define a_pre_llsc a_barrier
#define a_post_llsc a_barrier
#else
#define a_cas a_cas
static inline int a_cas(volatile int *p, int t, int s)
#ifndef __CCGO__
{
for (;;) {
register int r0 __asm__("r0") = t;
register int r1 __asm__("r1") = s;
register volatile int *r2 __asm__("r2") = p;
register uintptr_t r3 __asm__("r3") = __a_cas_ptr;
int old;
__asm__ __volatile__ (
BLX " r3"
: "+r"(r0), "+r"(r3) : "r"(r1), "r"(r2)
: "memory", "lr", "ip", "cc" );
if (!r0) return t;
if ((old=*p)!=t) return old;
}
}
#else
;
#endif
#endif
#ifndef a_barrier
#define a_barrier a_barrier
static inline void a_barrier()
#ifndef __CCGO__
{
register uintptr_t ip __asm__("ip") = __a_barrier_ptr;
__asm__ __volatile__( BLX " ip" : "+r"(ip) : : "memory", "cc", "lr" );
}
#else
;
#endif
#endif
#define a_crash a_crash
static inline void a_crash()
#ifndef __CCGO__
{
__asm__ __volatile__(
#ifndef __thumb__
".word 0xe7f000f0"
#else
".short 0xdeff"
#endif
: : : "memory");
}
#else
;
#endif
#if __ARM_ARCH >= 5 && (!__thumb__ || __thumb2__)
#define a_clz_32 a_clz_32
static inline int a_clz_32(uint32_t x)
#ifndef __CCGO__
{
__asm__ ("clz %0, %1" : "=r"(x) : "r"(x));
return x;
}
#else
;
#endif
#if __ARM_ARCH_6T2__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH >= 7
#define a_ctz_32 a_ctz_32
static inline int a_ctz_32(uint32_t x)
#ifndef __CCGO__
{
uint32_t xr;
__asm__ ("rbit %0, %1" : "=r"(xr) : "r"(x));
return a_clz_32(xr);
}
#else
;
#endif
#endif
#endif

View file

@ -0,0 +1,16 @@
#define FLT_EVAL_METHOD 0
#define LDBL_TRUE_MIN 4.94065645841246544177e-324L
#define LDBL_MIN 2.22507385850720138309e-308L
#define LDBL_MAX 1.79769313486231570815e+308L
#define LDBL_EPSILON 2.22044604925031308085e-16L
#define LDBL_MANT_DIG 53
#define LDBL_MIN_EXP (-1021)
#define LDBL_MAX_EXP 1024
#define LDBL_DIG 15
#define LDBL_MIN_10_EXP (-307)
#define LDBL_MAX_10_EXP 308
#define DECIMAL_DIG 17

View file

@ -0,0 +1,36 @@
static inline uintptr_t __get_tp();
#ifndef __CCGO__
#if ((__ARM_ARCH_6K__ || __ARM_ARCH_6KZ__ || __ARM_ARCH_6ZK__) && !__thumb__) \
|| __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH >= 7
static inline uintptr_t __get_tp()
{
uintptr_t tp;
__asm__ ( "mrc p15,0,%0,c13,c0,3" : "=r"(tp) );
return tp;
}
#else
#if __ARM_ARCH_4__ || __ARM_ARCH_4T__ || __ARM_ARCH == 4
#define BLX "mov lr,pc\n\tbx"
#else
#define BLX "blx"
#endif
static inline uintptr_t __get_tp()
{
extern hidden uintptr_t __a_gettp_ptr;
register uintptr_t tp __asm__("r0");
__asm__ ( BLX " %1" : "=r"(tp) : "r"(__a_gettp_ptr) : "cc", "lr" );
return tp;
}
#endif
#endif
#define TLS_ABOVE_TP
#define GAP_ABOVE_TP 8
#define MC_PC arm_pc

View file

@ -0,0 +1,127 @@
#ifdef __CCGO__
static __inline long __syscall0(long n);
static __inline long __syscall1(long n, long a1);
static __inline long __syscall2(long n, long a1, long a2);
static __inline long __syscall3(long n, long a1, long a2, long a3);
static __inline long __syscall4(long n, long a1, long a2, long a3, long a4);
static __inline long __syscall5(long n, long a1, long a2, long a3, long a4, long a5);
static __inline long __syscall6(long n, long a1, long a2, long a3, long a4, long a5, long a6);
#define __SYSCALL_LL_E(x) ((int)(x)), ((int)((x)>>32))
#define __SYSCALL_LL_O(x) 0, __SYSCALL_LL_E((x))
#else // __CCGO__
#define __SYSCALL_LL_E(x) \
((union { long long ll; long l[2]; }){ .ll = x }).l[0], \
((union { long long ll; long l[2]; }){ .ll = x }).l[1]
#define __SYSCALL_LL_O(x) 0, __SYSCALL_LL_E((x))
#ifdef __thumb__
/* Avoid use of r7 in asm constraints when producing thumb code,
* since it's reserved as frame pointer and might not be supported. */
#define __ASM____R7__
#define __asm_syscall(...) do { \
__asm__ __volatile__ ( "mov %1,r7 ; mov r7,%2 ; svc 0 ; mov r7,%1" \
: "=r"(r0), "=&r"((int){0}) : __VA_ARGS__ : "memory"); \
return r0; \
} while (0)
#else
#define __ASM____R7__ __asm__("r7")
#define __asm_syscall(...) do { \
__asm__ __volatile__ ( "svc 0" \
: "=r"(r0) : __VA_ARGS__ : "memory"); \
return r0; \
} while (0)
#endif
/* For thumb2, we can allow 8-bit immediate syscall numbers, saving a
* register in the above dance around r7. Does not work for thumb1 where
* only movs, not mov, supports immediates, and we can't use movs because
* it doesn't support high regs. */
#ifdef __thumb2__
#define R7_OPERAND "rI"(r7)
#else
#define R7_OPERAND "r"(r7)
#endif
static inline long __syscall0(long n)
{
register long r7 __ASM____R7__ = n;
register long r0 __asm__("r0");
__asm_syscall(R7_OPERAND);
}
static inline long __syscall1(long n, long a)
{
register long r7 __ASM____R7__ = n;
register long r0 __asm__("r0") = a;
__asm_syscall(R7_OPERAND, "0"(r0));
}
static inline long __syscall2(long n, long a, long b)
{
register long r7 __ASM____R7__ = n;
register long r0 __asm__("r0") = a;
register long r1 __asm__("r1") = b;
__asm_syscall(R7_OPERAND, "0"(r0), "r"(r1));
}
static inline long __syscall3(long n, long a, long b, long c)
{
register long r7 __ASM____R7__ = n;
register long r0 __asm__("r0") = a;
register long r1 __asm__("r1") = b;
register long r2 __asm__("r2") = c;
__asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2));
}
static inline long __syscall4(long n, long a, long b, long c, long d)
{
register long r7 __ASM____R7__ = n;
register long r0 __asm__("r0") = a;
register long r1 __asm__("r1") = b;
register long r2 __asm__("r2") = c;
register long r3 __asm__("r3") = d;
__asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2), "r"(r3));
}
static inline long __syscall5(long n, long a, long b, long c, long d, long e)
{
register long r7 __ASM____R7__ = n;
register long r0 __asm__("r0") = a;
register long r1 __asm__("r1") = b;
register long r2 __asm__("r2") = c;
register long r3 __asm__("r3") = d;
register long r4 __asm__("r4") = e;
__asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4));
}
static inline long __syscall6(long n, long a, long b, long c, long d, long e, long f)
{
register long r7 __ASM____R7__ = n;
register long r0 __asm__("r0") = a;
register long r1 __asm__("r1") = b;
register long r2 __asm__("r2") = c;
register long r3 __asm__("r3") = d;
register long r4 __asm__("r4") = e;
register long r5 __asm__("r5") = f;
__asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r5));
}
#define SYSCALL_IPC_BROKEN_MODE
#define VDSO_USEFUL
#define VDSO_CGT32_SYM "__vdso_clock_gettime"
#define VDSO_CGT32_VER "LINUX_2.6"
#define VDSO_CGT_SYM "__vdso_clock_gettime64"
#define VDSO_CGT_VER "LINUX_2.6"
#define VDSO_CGT_WORKAROUND 1
#endif // __CCGO__
#define SYSCALL_FADVISE_6_ARG