Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion interpreter/llvm/src/include/llvm/Object/ELFObjectFile.h
Original file line number Diff line number Diff line change
Expand Up @@ -905,7 +905,7 @@ unsigned ELFObjectFile<ELFT>::getArch() const {
case ELF::EM_X86_64:
return Triple::x86_64;
case ELF::EM_AARCH64:
return Triple::aarch64;
return IsLittleEndian ? Triple::aarch64 : Triple::aarch64_be;
case ELF::EM_ARM:
return Triple::arm;
case ELF::EM_AVR:
Expand Down
1 change: 1 addition & 0 deletions interpreter/llvm/src/include/llvm/Object/RelocVisitor.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@ class RelocVisitor {
return RelocToApply();
}
case Triple::aarch64:
case Triple::aarch64_be:
switch (RelocType) {
case llvm::ELF::R_AARCH64_ABS32:
return visitELF_AARCH64_ABS32(R, Value);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -309,6 +309,8 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
uint32_t *TargetPtr =
reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset));
uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
// Data should use target endian. Code should always use little endian.
bool isBE = Arch == Triple::aarch64_be;

DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
<< format("%llx", Section.getAddressWithOffset(Offset))
Expand All @@ -324,21 +326,30 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
case ELF::R_AARCH64_ABS64: {
uint64_t *TargetPtr =
reinterpret_cast<uint64_t *>(Section.getAddressWithOffset(Offset));
*TargetPtr = Value + Addend;
if (isBE)
support::ubig64_t::ref{TargetPtr} = Value + Addend;
else
support::ulittle64_t::ref{TargetPtr} = Value + Addend;
break;
}
case ELF::R_AARCH64_PREL32: {
uint64_t Result = Value + Addend - FinalAddress;
assert(static_cast<int64_t>(Result) >= INT32_MIN &&
static_cast<int64_t>(Result) <= UINT32_MAX);
*TargetPtr = static_cast<uint32_t>(Result & 0xffffffffU);
if (isBE)
support::ubig32_t::ref{TargetPtr} =
static_cast<uint32_t>(Result & 0xffffffffU);
else
support::ulittle32_t::ref{TargetPtr} =
static_cast<uint32_t>(Result & 0xffffffffU);
break;
}
case ELF::R_AARCH64_CALL26: // fallthrough
case ELF::R_AARCH64_JUMP26: {
// Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the
// calculation.
uint64_t BranchImm = Value + Addend - FinalAddress;
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};

// "Check that -2^27 <= result < 2^27".
assert(isInt<28>(BranchImm));
Expand All @@ -352,91 +363,105 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
}
case ELF::R_AARCH64_MOVW_UABS_G3: {
uint64_t Result = Value + Addend;
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};

// AArch64 code is emitted with .rela relocations. The data already in any
// bits affected by the relocation on entry is garbage.
*TargetPtr &= 0xffe0001fU;
TargetValue &= 0xffe0001fU;
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
*TargetPtr |= Result >> (48 - 5);
TargetValue |= ((Result & 0xffff000000000000ULL) >> (48 - 5));
// Shift must be "lsl #48", in bits 22:21
assert((*TargetPtr >> 21 & 0x3) == 3 && "invalid shift for relocation");
assert((TargetValue >> 21 & 0x3) == 3 && "invalid shift for relocation");
support::ulittle32_t::ref{TargetPtr} = TargetValue;
break;
}
case ELF::R_AARCH64_MOVW_UABS_G2_NC: {
uint64_t Result = Value + Addend;
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};

// AArch64 code is emitted with .rela relocations. The data already in any
// bits affected by the relocation on entry is garbage.
*TargetPtr &= 0xffe0001fU;
TargetValue &= 0xffe0001fU;
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
*TargetPtr |= ((Result & 0xffff00000000ULL) >> (32 - 5));
TargetValue |= ((Result & 0xffff00000000ULL) >> (32 - 5));
// Shift must be "lsl #32", in bits 22:21
assert((*TargetPtr >> 21 & 0x3) == 2 && "invalid shift for relocation");
assert((TargetValue >> 21 & 0x3) == 2 && "invalid shift for relocation");
support::ulittle32_t::ref{TargetPtr} = TargetValue;
break;
}
case ELF::R_AARCH64_MOVW_UABS_G1_NC: {
uint64_t Result = Value + Addend;
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};

// AArch64 code is emitted with .rela relocations. The data already in any
// bits affected by the relocation on entry is garbage.
*TargetPtr &= 0xffe0001fU;
TargetValue &= 0xffe0001fU;
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
*TargetPtr |= ((Result & 0xffff0000U) >> (16 - 5));
TargetValue |= ((Result & 0xffff0000U) >> (16 - 5));
// Shift must be "lsl #16", in bits 22:2
assert((*TargetPtr >> 21 & 0x3) == 1 && "invalid shift for relocation");
assert((TargetValue >> 21 & 0x3) == 1 && "invalid shift for relocation");
support::ulittle32_t::ref{TargetPtr} = TargetValue;
break;
}
case ELF::R_AARCH64_MOVW_UABS_G0_NC: {
uint64_t Result = Value + Addend;
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};

// AArch64 code is emitted with .rela relocations. The data already in any
// bits affected by the relocation on entry is garbage.
*TargetPtr &= 0xffe0001fU;
TargetValue &= 0xffe0001fU;
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
*TargetPtr |= ((Result & 0xffffU) << 5);
TargetValue |= ((Result & 0xffffU) << 5);
// Shift must be "lsl #0", in bits 22:21.
assert((*TargetPtr >> 21 & 0x3) == 0 && "invalid shift for relocation");
assert((TargetValue >> 21 & 0x3) == 0 && "invalid shift for relocation");
support::ulittle32_t::ref{TargetPtr} = TargetValue;
break;
}
case ELF::R_AARCH64_ADR_PREL_PG_HI21: {
// Operation: Page(S+A) - Page(P)
uint64_t Result =
((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL);
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};

// Check that -2^32 <= X < 2^32
assert(isInt<33>(Result) && "overflow check failed for relocation");

// AArch64 code is emitted with .rela relocations. The data already in any
// bits affected by the relocation on entry is garbage.
*TargetPtr &= 0x9f00001fU;
TargetValue &= 0x9f00001fU;
// Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken
// from bits 32:12 of X.
*TargetPtr |= ((Result & 0x3000U) << (29 - 12));
*TargetPtr |= ((Result & 0x1ffffc000ULL) >> (14 - 5));
TargetValue |= ((Result & 0x3000U) << (29 - 12));
TargetValue |= ((Result & 0x1ffffc000ULL) >> (14 - 5));
support::ulittle32_t::ref{TargetPtr} = TargetValue;
break;
}
case ELF::R_AARCH64_LDST32_ABS_LO12_NC: {
// Operation: S + A
uint64_t Result = Value + Addend;
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};

// AArch64 code is emitted with .rela relocations. The data already in any
// bits affected by the relocation on entry is garbage.
*TargetPtr &= 0xffc003ffU;
TargetValue &= 0xffc003ffU;
// Immediate goes in bits 21:10 of LD/ST instruction, taken
// from bits 11:2 of X
*TargetPtr |= ((Result & 0xffc) << (10 - 2));
TargetValue |= ((Result & 0xffc) << (10 - 2));
support::ulittle32_t::ref{TargetPtr} = TargetValue;
break;
}
case ELF::R_AARCH64_LDST64_ABS_LO12_NC: {
// Operation: S + A
uint64_t Result = Value + Addend;
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};

// AArch64 code is emitted with .rela relocations. The data already in any
// bits affected by the relocation on entry is garbage.
*TargetPtr &= 0xffc003ffU;
TargetValue &= 0xffc003ffU;
// Immediate goes in bits 21:10 of LD/ST instruction, taken
// from bits 11:3 of X
*TargetPtr |= ((Result & 0xff8) << (10 - 3));
TargetValue |= ((Result & 0xff8) << (10 - 3));
support::ulittle32_t::ref{TargetPtr} = TargetValue;
break;
}
}
Expand Down