[AMDGPU] Apply llvm-prefer-register-over-unsigned from clang-tidy

This commit is contained in:
Jay Foad 2020-08-21 10:01:01 +01:00
parent 90e0a02129
commit 98de0d22f5
9 changed files with 28 additions and 28 deletions

View file

@ -1873,7 +1873,7 @@ MachineBasicBlock *AMDGPUMachineCFGStructurizer::createIfBlock(
? SinglePred->findDebugLoc(SinglePred->getFirstTerminator())
: DebugLoc();
unsigned Reg =
Register Reg =
TII->insertEQ(IfBB, IfBB->begin(), DL, IfReg,
SelectBB->getNumber() /* CodeBBStart->getNumber() */);
if (&(*(IfBB->getParent()->begin())) == IfBB) {
@ -2335,7 +2335,7 @@ MachineBasicBlock *AMDGPUMachineCFGStructurizer::createIfRegion(
TII->removeBranch(*RegionExit);
// We need to create a backedge if there is a loop
unsigned Reg = TII->insertNE(
Register Reg = TII->insertNE(
RegionExit, RegionExit->instr_end(), DL,
CurrentRegion->getRegionMRT()->getInnerOutputRegister(),
CurrentRegion->getRegionMRT()->getEntry()->getNumber());
@ -2394,7 +2394,7 @@ MachineBasicBlock *AMDGPUMachineCFGStructurizer::createIfRegion(
TII->removeBranch(*RegionExit);
// We need to create a backedge if there is a loop
unsigned Reg =
Register Reg =
TII->insertNE(RegionExit, RegionExit->instr_end(), DL,
CurrentRegion->getRegionMRT()->getInnerOutputRegister(),
CurrentRegion->getRegionMRT()->getEntry()->getNumber());

View file

@ -1608,7 +1608,7 @@ SDValue R600TargetLowering::LowerFormalArguments(
}
if (AMDGPU::isShader(CallConv)) {
unsigned Reg = MF.addLiveIn(VA.getLocReg(), &R600::R600_Reg128RegClass);
Register Reg = MF.addLiveIn(VA.getLocReg(), &R600::R600_Reg128RegClass);
SDValue Register = DAG.getCopyFromReg(Chain, DL, Reg, VT);
InVals.push_back(Register);
continue;

View file

@ -769,7 +769,7 @@ void SIFixSGPRCopies::processPHINode(MachineInstr &MI) {
Visited.insert(&MI);
while (!worklist.empty()) {
const MachineInstr *Instr = worklist.pop_back_val();
unsigned Reg = Instr->getOperand(0).getReg();
Register Reg = Instr->getOperand(0).getReg();
for (const auto &Use : MRI->use_operands(Reg)) {
const MachineInstr *UseMI = Use.getParent();
AllAGPRUses &= (UseMI->isCopy() &&
@ -818,11 +818,11 @@ void SIFixSGPRCopies::processPHINode(MachineInstr &MI) {
bool hasVGPRInput = false;
for (unsigned i = 1; i < MI.getNumOperands(); i += 2) {
unsigned InputReg = MI.getOperand(i).getReg();
Register InputReg = MI.getOperand(i).getReg();
MachineInstr *Def = MRI->getVRegDef(InputReg);
if (TRI->isVectorRegister(*MRI, InputReg)) {
if (Def->isCopy()) {
unsigned SrcReg = Def->getOperand(1).getReg();
Register SrcReg = Def->getOperand(1).getReg();
const TargetRegisterClass *RC =
TRI->getRegClassForReg(*MRI, SrcReg);
if (TRI->isSGPRClass(RC))

View file

@ -1926,26 +1926,26 @@ void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo,
const SIRegisterInfo &TRI,
SIMachineFunctionInfo &Info) const {
if (Info.hasImplicitBufferPtr()) {
unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
Register ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
CCInfo.AllocateReg(ImplicitBufferPtrReg);
}
// FIXME: How should these inputs interact with inreg / custom SGPR inputs?
if (Info.hasPrivateSegmentBuffer()) {
unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
CCInfo.AllocateReg(PrivateSegmentBufferReg);
}
if (Info.hasDispatchPtr()) {
unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
Register DispatchPtrReg = Info.addDispatchPtr(TRI);
MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
CCInfo.AllocateReg(DispatchPtrReg);
}
if (Info.hasQueuePtr()) {
unsigned QueuePtrReg = Info.addQueuePtr(TRI);
Register QueuePtrReg = Info.addQueuePtr(TRI);
MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
CCInfo.AllocateReg(QueuePtrReg);
}
@ -1960,13 +1960,13 @@ void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo,
}
if (Info.hasDispatchID()) {
unsigned DispatchIDReg = Info.addDispatchID(TRI);
Register DispatchIDReg = Info.addDispatchID(TRI);
MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
CCInfo.AllocateReg(DispatchIDReg);
}
if (Info.hasFlatScratchInit()) {
unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
Register FlatScratchInitReg = Info.addFlatScratchInit(TRI);
MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
CCInfo.AllocateReg(FlatScratchInitReg);
}
@ -1982,25 +1982,25 @@ void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo,
CallingConv::ID CallConv,
bool IsShader) const {
if (Info.hasWorkGroupIDX()) {
unsigned Reg = Info.addWorkGroupIDX();
Register Reg = Info.addWorkGroupIDX();
MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
CCInfo.AllocateReg(Reg);
}
if (Info.hasWorkGroupIDY()) {
unsigned Reg = Info.addWorkGroupIDY();
Register Reg = Info.addWorkGroupIDY();
MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
CCInfo.AllocateReg(Reg);
}
if (Info.hasWorkGroupIDZ()) {
unsigned Reg = Info.addWorkGroupIDZ();
Register Reg = Info.addWorkGroupIDZ();
MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
CCInfo.AllocateReg(Reg);
}
if (Info.hasWorkGroupInfo()) {
unsigned Reg = Info.addWorkGroupInfo();
Register Reg = Info.addWorkGroupInfo();
MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
CCInfo.AllocateReg(Reg);
}
@ -5016,7 +5016,7 @@ SDValue SITargetLowering::LowerRETURNADDR(SDValue Op,
const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
// Get the return address reg and mark it as an implicit live-in
unsigned Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent()));
Register Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent()));
return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
}
@ -5112,7 +5112,7 @@ SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
unsigned UserSGPR = Info->getQueuePtrUserSGPR();
Register UserSGPR = Info->getQueuePtrUserSGPR();
assert(UserSGPR != AMDGPU::NoRegister);
SDValue QueuePtr = CreateLiveInRegister(
DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);

View file

@ -2131,7 +2131,7 @@ unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
// buzz;
RS->enterBasicBlockEnd(MBB);
unsigned Scav = RS->scavengeRegisterBackwards(
Register Scav = RS->scavengeRegisterBackwards(
AMDGPU::SReg_64RegClass,
MachineBasicBlock::iterator(GetPC), false, 0);
MRI.replaceRegWith(PCReg, Scav);
@ -4564,7 +4564,7 @@ void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
int ConstantBusLimit = ST.getConstantBusLimit(Opc);
int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0;
SmallDenseSet<unsigned> SGPRsUsed;
unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx);
Register SGPRReg = findUsedSGPR(MI, VOP3Idx);
if (SGPRReg != AMDGPU::NoRegister) {
SGPRsUsed.insert(SGPRReg);
--ConstantBusLimit;
@ -4668,12 +4668,12 @@ void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI,
// pointer value is uniform.
MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase);
if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) {
unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI);
Register SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI);
SBase->setReg(SGPR);
}
MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff);
if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) {
unsigned SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI);
Register SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI);
SOff->setReg(SGPR);
}
}
@ -6317,7 +6317,7 @@ void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op,
if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) {
if (MI.isCopy()) {
MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
unsigned DestReg = MI.getOperand(0).getReg();
Register DestReg = MI.getOperand(0).getReg();
for (auto &User : MRI.use_nodbg_instructions(DestReg)) {
if ((User.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) ||

View file

@ -677,7 +677,7 @@ public:
bool isVGPRCopy(const MachineInstr &MI) const {
assert(MI.isCopy());
unsigned Dest = MI.getOperand(0).getReg();
Register Dest = MI.getOperand(0).getReg();
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
return !RI.isSGPRReg(MRI, Dest);

View file

@ -504,7 +504,7 @@ SILowerControlFlow::skipIgnoreExecInstsTrivialSucc(
void SILowerControlFlow::emitEndCf(MachineInstr &MI) {
MachineBasicBlock &MBB = *MI.getParent();
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
unsigned CFMask = MI.getOperand(0).getReg();
Register CFMask = MI.getOperand(0).getReg();
MachineInstr *Def = MRI.getUniqueVRegDef(CFMask);
const DebugLoc &DL = MI.getDebugLoc();

View file

@ -657,7 +657,7 @@ void SILowerI1Copies::lowerPhis() {
}
}
unsigned NewReg = SSAUpdater.GetValueInMiddleOfBlock(&MBB);
Register NewReg = SSAUpdater.GetValueInMiddleOfBlock(&MBB);
if (NewReg != DstReg) {
MRI->replaceRegWith(NewReg, DstReg);
MI->eraseFromParent();

View file

@ -322,7 +322,7 @@ BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
unsigned ScratchRSrcReg = MFI->getScratchRSrcReg();
Register ScratchRSrcReg = MFI->getScratchRSrcReg();
if (ScratchRSrcReg != AMDGPU::NoRegister) {
// Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
// to spill.