radeon/llvm: Remove lowering code for unsupported features

e.g. function calls, load/store from stack
This commit is contained in:
Tom Stellard 2012-07-27 20:06:43 +00:00
parent caeaf43dad
commit 0ce6e50601
8 changed files with 50 additions and 805 deletions

View File

@ -35,6 +35,41 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setOperationAction(ISD::UREM, MVT::i32, Expand);
}
//===---------------------------------------------------------------------===//
// TargetLowering Callbacks
//===---------------------------------------------------------------------===//
SDValue AMDGPUTargetLowering::LowerFormalArguments(
SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const
{
// Lowering of arguments happens in R600LowerKernelParameters, so we can
// ignore the arguments here.
for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
InVals.push_back(SDValue());
}
return Chain;
}
SDValue AMDGPUTargetLowering::LowerReturn(
SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
DebugLoc DL, SelectionDAG &DAG) const
{
return DAG.getNode(AMDILISD::RET_FLAG, DL, MVT::Other, Chain);
}
//===---------------------------------------------------------------------===//
// Target specific lowering
//===---------------------------------------------------------------------===//
SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
const
{

View File

@ -43,6 +43,18 @@ protected:
public:
AMDGPUTargetLowering(TargetMachine &TM);
virtual SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
DebugLoc DL, SelectionDAG &DAG) const;
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerIntrinsicIABS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerIntrinsicLRP(SDValue Op, SelectionDAG &DAG) const;

View File

@ -242,35 +242,7 @@ AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
unsigned int Opc = 0;
// MachineInstr *curMI = MI;
MachineFunction &MF = *(MBB.getParent());
MachineFrameInfo &MFI = *MF.getFrameInfo();
DebugLoc DL;
switch (RC->getID()) {
case AMDGPU::GPRF32RegClassID:
Opc = AMDGPU::PRIVATESTORE_f32;
break;
case AMDGPU::GPRI32RegClassID:
Opc = AMDGPU::PRIVATESTORE_i32;
break;
}
if (MI != MBB.end()) DL = MI->getDebugLoc();
MachineMemOperand *MMO =
new MachineMemOperand(
MachinePointerInfo::getFixedStack(FrameIndex),
MachineMemOperand::MOLoad,
MFI.getObjectSize(FrameIndex),
MFI.getObjectAlignment(FrameIndex));
if (MI != MBB.end()) {
DL = MI->getDebugLoc();
}
BuildMI(MBB, MI, DL, get(Opc))
.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FrameIndex)
.addMemOperand(MMO)
.addImm(0);
assert(!"Not Implemented");
}
void
@ -279,34 +251,9 @@ AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
unsigned int Opc = 0;
MachineFunction &MF = *(MBB.getParent());
MachineFrameInfo &MFI = *MF.getFrameInfo();
DebugLoc DL;
switch (RC->getID()) {
case AMDGPU::GPRF32RegClassID:
Opc = AMDGPU::PRIVATELOAD_f32;
break;
case AMDGPU::GPRI32RegClassID:
Opc = AMDGPU::PRIVATELOAD_i32;
break;
}
MachineMemOperand *MMO =
new MachineMemOperand(
MachinePointerInfo::getFixedStack(FrameIndex),
MachineMemOperand::MOLoad,
MFI.getObjectSize(FrameIndex),
MFI.getObjectAlignment(FrameIndex));
if (MI != MBB.end()) {
DL = MI->getDebugLoc();
}
BuildMI(MBB, MI, DL, get(Opc))
.addReg(DestReg, RegState::Define)
.addFrameIndex(FrameIndex)
.addMemOperand(MMO)
.addImm(0);
assert(!"Not Implemented");
}
MachineInstr *
AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,

View File

@ -76,7 +76,6 @@ def FeatureDumpCode : SubtargetFeature <"DumpCode",
include "AMDILRegisterInfo.td"
include "AMDILCallingConv.td"
include "AMDILInstrInfo.td"
def AMDILInstrInfo : InstrInfo {}

View File

@ -1,42 +0,0 @@
//===- AMDILCallingConv.td - Calling Conventions AMDIL -----*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//==-----------------------------------------------------------------------===//
//
// This describes the calling conventions for the AMDIL architectures.
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// Return Value Calling Conventions
//===----------------------------------------------------------------------===//
// AMDIL 32-bit C return-value convention.
def RetCC_AMDIL32 : CallingConv<[
// Since IL has no return values, all values can be emulated on the stack
// The stack can then be mapped to a number of sequential virtual registers
// in IL
// Integer and FP scalar values get put on the stack at 16-byte alignment
// but with a size of 4 bytes
CCIfType<[i32, f32], CCAssignToReg<
[
R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, R13, R14, R15, R16, R17, R18, R19, R20
]> >, CCAssignToStack<16, 16>]>;
// AMDIL 32-bit C Calling convention.
def CC_AMDIL32 : CallingConv<[
// Since IL has parameter values, all values can be emulated on the stack
// The stack can then be mapped to a number of sequential virtual registers
// in IL
// Integer and FP scalar values get put on the stack at 16-byte alignment
// but with a size of 4 bytes
// Integer and FP scalar values get put on the stack at 16-byte alignment
// but with a size of 4 bytes
CCIfType<[i32, f32], CCAssignToReg<
[R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, R13, R14, R15, R16, R17, R18, R19, R20
]> >, CCAssignToStack<16, 16>]>;

View File

@ -413,37 +413,6 @@ CondCCodeToCC(ISD::CondCode CC, const MVT::SimpleValueType& type)
};
}
SDValue
AMDILTargetLowering::LowerMemArgument(
SDValue Chain,
CallingConv::ID CallConv,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
const CCValAssign &VA,
MachineFrameInfo *MFI,
unsigned i) const
{
// Create the nodes corresponding to a load from this parameter slot.
ISD::ArgFlagsTy Flags = Ins[i].Flags;
bool AlwaysUseMutable = (CallConv==CallingConv::Fast) &&
getTargetMachine().Options.GuaranteedTailCallOpt;
bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
// FIXME: For now, all byval parameter objects are marked mutable. This can
// be changed with more analysis.
// In case of tail call optimization mark all arguments mutable. Since they
// could be overwritten by lowering of arguments in case of a tail call.
int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8,
VA.getLocMemOffset(), isImmutable);
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
if (Flags.isByVal())
return FIN;
return DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
MachinePointerInfo::getFixedStack(FI),
false, false, false, 0);
}
//===----------------------------------------------------------------------===//
// TargetLowering Implementation Help Functions End
//===----------------------------------------------------------------------===//
@ -632,17 +601,9 @@ AMDILTargetLowering::LowerMemArgument(
setOperationAction(ISD::BUILD_VECTOR, MVT::Other, Custom);
// Use the default implementation.
setOperationAction(ISD::VAARG , MVT::Other, Expand);
setOperationAction(ISD::VACOPY , MVT::Other, Expand);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
setOperationAction(ISD::ConstantFP , MVT::f32 , Legal);
setOperationAction(ISD::Constant , MVT::i32 , Legal);
setOperationAction(ISD::TRAP , MVT::Other , Legal);
setStackPointerRegisterToSaveRestore(AMDGPU::SP);
setSchedulingPreference(Sched::RegPressure);
setPow2DivIsCheap(false);
setPrefLoopAlignment(16);
@ -753,58 +714,6 @@ AMDILTargetLowering::computeMaskedBitsForTargetNode(
};
}
// This is the function that determines which calling convention should
// be used. Currently there is only one calling convention
CCAssignFn*
AMDILTargetLowering::CCAssignFnForNode(unsigned int Op) const
{
//uint64_t CC = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
return CC_AMDIL32;
}
// LowerCallResult - Lower the result values of an ISD::CALL into the
// appropriate copies out of appropriate physical registers. This assumes that
// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
// being lowered. The returns a SDNode with the same number of values as the
// ISD::CALL.
SDValue
AMDILTargetLowering::LowerCallResult(
SDValue Chain,
SDValue InFlag,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const
{
// Assign locations to each value returned by this call
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
getTargetMachine(), RVLocs, *DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_AMDIL32);
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
EVT CopyVT = RVLocs[i].getValVT();
if (RVLocs[i].isRegLoc()) {
Chain = DAG.getCopyFromReg(
Chain,
dl,
RVLocs[i].getLocReg(),
CopyVT,
InFlag
).getValue(1);
SDValue Val = Chain.getValue(0);
InFlag = Chain.getValue(2);
InVals.push_back(Val);
}
}
return Chain;
}
//===----------------------------------------------------------------------===//
// Other Lowering Hooks
//===----------------------------------------------------------------------===//
@ -836,16 +745,11 @@ AMDILTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
assert(0 && "Custom lowering code for this"
"instruction is not implemented yet!");
break;
LOWER(GlobalAddress);
LOWER(JumpTable);
LOWER(ConstantPool);
LOWER(ExternalSymbol);
LOWER(SDIV);
LOWER(SREM);
LOWER(BUILD_VECTOR);
LOWER(SELECT);
LOWER(SIGN_EXTEND_INREG);
LOWER(DYNAMIC_STACKALLOC);
LOWER(BRCOND);
}
return Op;
@ -853,392 +757,6 @@ AMDILTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
#undef LOWER
SDValue
AMDILTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
{
SDValue DST = Op;
const GlobalAddressSDNode *GADN = cast<GlobalAddressSDNode>(Op);
const GlobalValue *G = GADN->getGlobal();
DebugLoc DL = Op.getDebugLoc();
const GlobalVariable *GV = dyn_cast<GlobalVariable>(G);
if (!GV) {
DST = DAG.getTargetGlobalAddress(GV, DL, MVT::i32);
} else {
if (GV->hasInitializer()) {
const Constant *C = dyn_cast<Constant>(GV->getInitializer());
if (const ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
DST = DAG.getConstant(CI->getValue(), Op.getValueType());
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(C)) {
DST = DAG.getConstantFP(CF->getValueAPF(),
Op.getValueType());
} else if (dyn_cast<ConstantAggregateZero>(C)) {
EVT VT = Op.getValueType();
if (VT.isInteger()) {
DST = DAG.getConstant(0, VT);
} else {
DST = DAG.getConstantFP(0, VT);
}
} else {
assert(!"lowering this type of Global Address "
"not implemented yet!");
C->dump();
DST = DAG.getTargetGlobalAddress(GV, DL, MVT::i32);
}
} else {
DST = DAG.getTargetGlobalAddress(GV, DL, MVT::i32);
}
}
return DST;
}
SDValue
AMDILTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const
{
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
return Result;
}
SDValue
AMDILTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
{
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
EVT PtrVT = Op.getValueType();
SDValue Result;
if (CP->isMachineConstantPoolEntry()) {
Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
CP->getAlignment(), CP->getOffset(), CP->getTargetFlags());
} else {
Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
CP->getAlignment(), CP->getOffset(), CP->getTargetFlags());
}
return Result;
}
SDValue
AMDILTargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const
{
const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
SDValue Result = DAG.getTargetExternalSymbol(Sym, MVT::i32);
return Result;
}
/// LowerFORMAL_ARGUMENTS - transform physical registers into
/// virtual registers and generate load operations for
/// arguments places on the stack.
/// TODO: isVarArg, hasStructRet, isMemReg
SDValue
AMDILTargetLowering::LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const
{
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
//const Function *Fn = MF.getFunction();
//MachineRegisterInfo &RegInfo = MF.getRegInfo();
SmallVector<CCValAssign, 16> ArgLocs;
CallingConv::ID CC = MF.getFunction()->getCallingConv();
//bool hasStructRet = MF.getFunction()->hasStructRetAttr();
CCState CCInfo(CC, isVarArg, DAG.getMachineFunction(),
getTargetMachine(), ArgLocs, *DAG.getContext());
// When more calling conventions are added, they need to be chosen here
CCInfo.AnalyzeFormalArguments(Ins, CC_AMDIL32);
SDValue StackPtr;
//unsigned int FirstStackArgLoc = 0;
for (unsigned int i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
if (VA.isRegLoc()) {
EVT RegVT = VA.getLocVT();
const TargetRegisterClass *RC = getRegClassFor(
RegVT.getSimpleVT().SimpleTy);
unsigned int Reg = MF.addLiveIn(VA.getLocReg(), RC);
SDValue ArgValue = DAG.getCopyFromReg(
Chain,
dl,
Reg,
RegVT);
// If this is an 8 or 16-bit value, it is really passed
// promoted to 32 bits. Insert an assert[sz]ext to capture
// this, then truncate to the right size.
if (VA.getLocInfo() == CCValAssign::SExt) {
ArgValue = DAG.getNode(
ISD::AssertSext,
dl,
RegVT,
ArgValue,
DAG.getValueType(VA.getValVT()));
} else if (VA.getLocInfo() == CCValAssign::ZExt) {
ArgValue = DAG.getNode(
ISD::AssertZext,
dl,
RegVT,
ArgValue,
DAG.getValueType(VA.getValVT()));
}
if (VA.getLocInfo() != CCValAssign::Full) {
ArgValue = DAG.getNode(
ISD::TRUNCATE,
dl,
VA.getValVT(),
ArgValue);
}
// Add the value to the list of arguments
// to be passed in registers
InVals.push_back(ArgValue);
if (isVarArg) {
assert(0 && "Variable arguments are not yet supported");
// See MipsISelLowering.cpp for ideas on how to implement
}
} else if(VA.isMemLoc()) {
InVals.push_back(LowerMemArgument(Chain, CallConv, Ins,
dl, DAG, VA, MFI, i));
} else {
assert(0 && "found a Value Assign that is "
"neither a register or a memory location");
}
}
/*if (hasStructRet) {
assert(0 && "Has struct return is not yet implemented");
// See MipsISelLowering.cpp for ideas on how to implement
}*/
if (isVarArg) {
assert(0 && "Variable arguments are not yet supported");
// See X86/PPC/CellSPU ISelLowering.cpp for ideas on how to implement
}
// This needs to be changed to non-zero if the return function needs
// to pop bytes
return Chain;
}
/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
/// by "Src" to address "Dst" with size and alignment information specified by
/// the specific parameter attribute. The copy will be passed as a byval
/// function parameter.
static SDValue
CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
ISD::ArgFlagsTy Flags, SelectionDAG &DAG) {
assert(0 && "MemCopy does not exist yet");
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
return DAG.getMemcpy(Chain,
Src.getDebugLoc(),
Dst, Src, SizeNode, Flags.getByValAlign(),
/*IsVol=*/false, /*AlwaysInline=*/true,
MachinePointerInfo(), MachinePointerInfo());
}
SDValue
AMDILTargetLowering::LowerMemOpCallTo(SDValue Chain,
SDValue StackPtr, SDValue Arg,
DebugLoc dl, SelectionDAG &DAG,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const
{
unsigned int LocMemOffset = VA.getLocMemOffset();
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
PtrOff = DAG.getNode(ISD::ADD,
dl,
getPointerTy(), StackPtr, PtrOff);
if (Flags.isByVal()) {
PtrOff = CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG);
} else {
PtrOff = DAG.getStore(Chain, dl, Arg, PtrOff,
MachinePointerInfo::getStack(LocMemOffset),
false, false, 0);
}
return PtrOff;
}
/// LowerCAL - functions arguments are copied from virtual
/// regs to (physical regs)/(stack frame), CALLSEQ_START and
/// CALLSEQ_END are emitted.
/// TODO: isVarArg, isTailCall, hasStructRet
SDValue
AMDILTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool doesNotRet,
bool& isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const
{
isTailCall = false;
MachineFunction& MF = DAG.getMachineFunction();
// FIXME: DO we need to handle fast calling conventions and tail call
// optimizations?? X86/PPC ISelLowering
/*bool hasStructRet = (TheCall->getNumArgs())
? TheCall->getArgFlags(0).device()->isSRet()
: false;*/
MachineFrameInfo *MFI = MF.getFrameInfo();
// Analyze operands of the call, assigning locations to each operand
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
getTargetMachine(), ArgLocs, *DAG.getContext());
// Analyize the calling operands, but need to change
// if we have more than one calling convetion
CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv));
unsigned int NumBytes = CCInfo.getNextStackOffset();
if (isTailCall) {
assert(isTailCall && "Tail Call not handled yet!");
// See X86/PPC ISelLowering
}
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
SmallVector<std::pair<unsigned int, SDValue>, 8> RegsToPass;
SmallVector<SDValue, 8> MemOpChains;
SDValue StackPtr;
//unsigned int FirstStacArgLoc = 0;
//int LastArgStackLoc = 0;
// Walk the register/memloc assignments, insert copies/loads
for (unsigned int i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
//bool isByVal = Flags.isByVal(); // handle byval/bypointer registers
// Arguments start after the 5 first operands of ISD::CALL
SDValue Arg = OutVals[i];
//Promote the value if needed
switch(VA.getLocInfo()) {
default: assert(0 && "Unknown loc info!");
case CCValAssign::Full:
break;
case CCValAssign::SExt:
Arg = DAG.getNode(ISD::SIGN_EXTEND,
dl,
VA.getLocVT(), Arg);
break;
case CCValAssign::ZExt:
Arg = DAG.getNode(ISD::ZERO_EXTEND,
dl,
VA.getLocVT(), Arg);
break;
case CCValAssign::AExt:
Arg = DAG.getNode(ISD::ANY_EXTEND,
dl,
VA.getLocVT(), Arg);
break;
}
if (VA.isRegLoc()) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
} else if (VA.isMemLoc()) {
// Create the frame index object for this incoming parameter
int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8,
VA.getLocMemOffset(), true);
SDValue PtrOff = DAG.getFrameIndex(FI,getPointerTy());
// emit ISD::STORE whichs stores the
// parameter value to a stack Location
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
MachinePointerInfo::getFixedStack(FI),
false, false, 0));
} else {
assert(0 && "Not a Reg/Mem Loc, major error!");
}
}
if (!MemOpChains.empty()) {
Chain = DAG.getNode(ISD::TokenFactor,
dl,
MVT::Other,
&MemOpChains[0],
MemOpChains.size());
}
SDValue InFlag;
if (!isTailCall) {
for (unsigned int i = 0, e = RegsToPass.size(); i != e; ++i) {
Chain = DAG.getCopyToReg(Chain,
dl,
RegsToPass[i].first,
RegsToPass[i].second,
InFlag);
InFlag = Chain.getValue(1);
}
}
// If the callee is a GlobalAddress/ExternalSymbol node (quite common,
// every direct call is) turn it into a TargetGlobalAddress/
// TargetExternalSymbol
// node so that legalize doesn't hack it.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy());
}
else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
}
else if (isTailCall) {
assert(0 && "Tail calls are not handled yet");
// see X86 ISelLowering for ideas on implementation: 1708
}
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVTGLUE);
SmallVector<SDValue, 8> Ops;
if (isTailCall) {
assert(0 && "Tail calls are not handled yet");
// see X86 ISelLowering for ideas on implementation: 1721
}
// If this is a direct call, pass the chain and the callee
if (Callee.getNode()) {
Ops.push_back(Chain);
Ops.push_back(Callee);
}
if (isTailCall) {
assert(0 && "Tail calls are not handled yet");
// see X86 ISelLowering for ideas on implementation: 1739
}
// Add argument registers to the end of the list so that they are known
// live into the call
for (unsigned int i = 0, e = RegsToPass.size(); i != e; ++i) {
Ops.push_back(DAG.getRegister(
RegsToPass[i].first,
RegsToPass[i].second.getValueType()));
}
if (InFlag.getNode()) {
Ops.push_back(InFlag);
}
// Emit Tail Call
if (isTailCall) {
assert(0 && "Tail calls are not handled yet");
// see X86 ISelLowering for ideas on implementation: 1762
}
Chain = DAG.getNode(AMDILISD::CALL,
dl,
NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
// Create the CALLSEQ_END node
Chain = DAG.getCALLSEQ_END(
Chain,
DAG.getIntPtrConstant(NumBytes, true),
DAG.getIntPtrConstant(0, true),
InFlag);
InFlag = Chain.getValue(1);
// Handle result values, copying them out of physregs into vregs that
// we return
return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
InVals);
}
SDValue
AMDILTargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const
{
@ -1410,27 +928,6 @@ AMDILTargetLowering::genIntType(uint32_t size, uint32_t numEle) const
}
}
SDValue
AMDILTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const
{
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
unsigned int SPReg = AMDGPU::SP;
DebugLoc DL = Op.getDebugLoc();
SDValue SP = DAG.getCopyFromReg(Chain,
DL,
SPReg, MVT::i32);
SDValue NewSP = DAG.getNode(ISD::ADD,
DL,
MVT::i32, SP, Size);
Chain = DAG.getCopyToReg(SP.getValue(1),
DL,
SPReg, NewSP);
SDValue Ops[2] = {NewSP, Chain};
Chain = DAG.getMergeValues(Ops, 2 ,DL);
return Chain;
}
SDValue
AMDILTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const
{
@ -1446,78 +943,6 @@ AMDILTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const
return Result;
}
// LowerRET - Lower an ISD::RET node.
SDValue
AMDILTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG)
const
{
//MachineFunction& MF = DAG.getMachineFunction();
// CCValAssign - represent the assignment of the return value
// to a location
SmallVector<CCValAssign, 16> RVLocs;
// CCState - Info about the registers and stack slot
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
getTargetMachine(), RVLocs, *DAG.getContext());
// Analyze return values of ISD::RET
CCInfo.AnalyzeReturn(Outs, RetCC_AMDIL32);
// If this is the first return lowered for this function, add
// the regs to the liveout set for the function
MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
for (unsigned int i = 0, e = RVLocs.size(); i != e; ++i) {
if (RVLocs[i].isRegLoc() && !MRI.isLiveOut(RVLocs[i].getLocReg())) {
MRI.addLiveOut(RVLocs[i].getLocReg());
}
}
// FIXME: implement this when tail call is implemented
// Chain = GetPossiblePreceedingTailCall(Chain, AMDILISD::TAILCALL);
// both x86 and ppc implement this in ISelLowering
// Regular return here
SDValue Flag;
SmallVector<SDValue, 6> RetOps;
RetOps.push_back(Chain);
RetOps.push_back(DAG.getConstant(0/*getBytesToPopOnReturn()*/, MVT::i32));
for (unsigned int i = 0, e = RVLocs.size(); i != e; ++i) {
CCValAssign &VA = RVLocs[i];
SDValue ValToCopy = OutVals[i];
assert(VA.isRegLoc() && "Can only return in registers!");
// ISD::Ret => ret chain, (regnum1, val1), ...
// So i * 2 + 1 index only the regnums
Chain = DAG.getCopyToReg(Chain,
dl,
VA.getLocReg(),
ValToCopy,
Flag);
// guarantee that all emitted copies are stuck together
// avoiding something bad
Flag = Chain.getValue(1);
}
/*if (MF.getFunction()->hasStructRetAttr()) {
assert(0 && "Struct returns are not yet implemented!");
// Both MIPS and X86 have this
}*/
RetOps[0] = Chain;
if (Flag.getNode())
RetOps.push_back(Flag);
Flag = DAG.getNode(AMDILISD::RET_FLAG,
dl,
MVT::Other, &RetOps[0], RetOps.size());
return Flag;
}
unsigned int
AMDILTargetLowering::getFunctionAlignment(const Function *) const
{
return 0;
}
SDValue
AMDILTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const
{

View File

@ -90,73 +90,6 @@ namespace llvm
// we don't want to use a f2d instruction.
bool ShouldShrinkFPConstant(EVT VT) const;
/// getFunctionAlignment - Return the Log2 alignment of this
/// function.
virtual unsigned int
getFunctionAlignment(const Function *F) const;
private:
CCAssignFn*
CCAssignFnForNode(unsigned int CC) const;
SDValue LowerCallResult(SDValue Chain,
SDValue InFlag,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerMemArgument(SDValue Chain,
CallingConv::ID CallConv,
const SmallVectorImpl<ISD::InputArg> &ArgInfo,
DebugLoc dl, SelectionDAG &DAG,
const CCValAssign &VA, MachineFrameInfo *MFI,
unsigned i) const;
SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
SDValue Arg,
DebugLoc dl, SelectionDAG &DAG,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const;
virtual SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool doesNotRet,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
SDValue
LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue
LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
SDValue
LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
SDValue
LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
SDValue
LowerSREM(SDValue Op, SelectionDAG &DAG) const;
SDValue
@ -189,9 +122,6 @@ namespace llvm
EVT
genIntType(uint32_t size = 32, uint32_t numEle = 1) const;
SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
SDValue
LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;

View File

@ -7,49 +7,6 @@
//
//==-----------------------------------------------------------------------===//
let Predicates = [Has32BitPtr] in {
let isCodeGenOnly=1 in {
//===----------------------------------------------------------------------===//
// Store Memory Operations
//===----------------------------------------------------------------------===//
defm GLOBALTRUNCSTORE : GTRUNCSTORE<"!global trunc store">;
defm LOCALTRUNCSTORE : LTRUNCSTORE<"!local trunc store">;
defm LOCALSTORE : STORE<"!local store" , local_store>;
defm PRIVATETRUNCSTORE : PTRUNCSTORE<"!private trunc store">;
defm PRIVATESTORE : STORE<"!private store" , private_store>;
defm REGIONTRUNCSTORE : RTRUNCSTORE<"!region trunc store">;
defm REGIONSTORE : STORE<"!region hw store" , region_store>;
//===---------------------------------------------------------------------===//
// Load Memory Operations
//===---------------------------------------------------------------------===//
defm GLOBALZEXTLOAD : LOAD<"!global zext load" , global_zext_load>;
defm GLOBALSEXTLOAD : LOAD<"!global sext load" , global_sext_load>;
defm GLOBALAEXTLOAD : LOAD<"!global aext load" , global_aext_load>;
defm PRIVATELOAD : LOAD<"!private load" , private_load>;
defm PRIVATEZEXTLOAD : LOAD<"!private zext load" , private_zext_load>;
defm PRIVATESEXTLOAD : LOAD<"!private sext load" , private_sext_load>;
defm PRIVATEAEXTLOAD : LOAD<"!private aext load" , private_aext_load>;
defm CPOOLLOAD : LOAD<"!constant pool load" , cp_load>;
defm CPOOLZEXTLOAD : LOAD<"!constant pool zext load", cp_zext_load>;
defm CPOOLSEXTLOAD : LOAD<"!constant pool sext load", cp_sext_load>;
defm CPOOLAEXTLOAD : LOAD<"!constant aext pool load", cp_aext_load>;
defm CONSTANTLOAD : LOAD<"!constant load" , constant_load>;
defm CONSTANTZEXTLOAD : LOAD<"!constant zext load" , constant_zext_load>;
defm CONSTANTSEXTLOAD : LOAD<"!constant sext load" , constant_sext_load>;
defm CONSTANTAEXTLOAD : LOAD<"!constant aext load" , constant_aext_load>;
defm LOCALLOAD : LOAD<"!local load" , local_load>;
defm LOCALZEXTLOAD : LOAD<"!local zext load" , local_zext_load>;
defm LOCALSEXTLOAD : LOAD<"!local sext load" , local_sext_load>;
defm LOCALAEXTLOAD : LOAD<"!local aext load" , local_aext_load>;
defm REGIONLOAD : LOAD<"!region load" , region_load>;
defm REGIONZEXTLOAD : LOAD<"!region zext load" , region_zext_load>;
defm REGIONSEXTLOAD : LOAD<"!region sext load" , region_sext_load>;
defm REGIONAEXTLOAD : LOAD<"!region aext load" , region_aext_load>;
}
}
//===---------------------------------------------------------------------===//
// Custom Inserter for Branches and returns, this eventually will be a
// seperate pass
@ -68,24 +25,6 @@ let isTerminator = 1, isReturn = 1, isBarrier = 1, hasCtrlDep = 1 in {
IL_OP_RET.Text, [(IL_retflag)]>;
}
//===---------------------------------------------------------------------===//
// Handle a function call
//===---------------------------------------------------------------------===//
let isCall = 1,
Defs = [
R1, R2, R3, R4, R5, R6, R7, R8, R9, R10
]
,
Uses = [
R11, R12, R13, R14, R15, R16, R17, R18, R19, R20
]
in {
def CALL : UnaryOpNoRet<IL_OP_CALL, (outs),
(ins calltarget:$dst, variable_ops),
!strconcat(IL_OP_CALL.Text, " $dst"), []>;
}
//===---------------------------------------------------------------------===//
// Flow and Program control Instructions
//===---------------------------------------------------------------------===//