Skip to content
This repository was archived by the owner on Apr 23, 2020. It is now read-only.

Commit c87c1c0

Browse files
committed
This reverts commit r319096 and r319097.
Revert "[SROA] Propagate !range metadata when moving loads." Revert "[Mem2Reg] Clang-format unformatted parts of this file. NFCI." Davide says they broke a bot. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@319131 91177308-0d34-0410-b5e6-96231b3b80d8
1 parent 68fabe0 commit c87c1c0

File tree

4 files changed

+34
-185
lines changed

4 files changed

+34
-185
lines changed

lib/Transforms/Scalar/SROA.cpp

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2455,10 +2455,15 @@ class llvm::sroa::AllocaSliceRewriter
24552455
// are different types, for example by mapping !nonnull metadata to
24562456
// !range metadata by modeling the null pointer constant converted to the
24572457
// integer type.
2458+
// FIXME: Add support for range metadata here. Currently the utilities
2459+
// for this don't propagate range metadata in trivial cases from one
2460+
// integer load to another, don't handle non-addrspace-0 null pointers
2461+
// correctly, and don't have any support for mapping ranges as the
2462+
// integer type becomes winder or narrower.
24582463
if (MDNode *N = LI.getMetadata(LLVMContext::MD_nonnull))
24592464
copyNonnullMetadata(LI, N, *NewLI);
2460-
if (MDNode *N = LI.getMetadata(LLVMContext::MD_range))
2461-
copyRangeMetadata(DL, LI, N, *NewLI);
2465+
2466+
// Try to preserve nonnull metadata
24622467
V = NewLI;
24632468

24642469
// If this is an integer load past the end of the slice (which means the
@@ -3649,7 +3654,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
36493654
PartPtrTy, BasePtr->getName() + "."),
36503655
getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false,
36513656
LI->getName());
3652-
PLoad->copyMetadata(*LI, LLVMContext::MD_mem_parallel_loop_access);
3657+
PLoad->copyMetadata(*LI, LLVMContext::MD_mem_parallel_loop_access);
36533658

36543659
// Append this load onto the list of split loads so we can find it later
36553660
// to rewrite the stores.

lib/Transforms/Utils/Local.cpp

Lines changed: 9 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1947,24 +1947,18 @@ void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
19471947
void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
19481948
MDNode *N, LoadInst &NewLI) {
19491949
auto *NewTy = NewLI.getType();
1950-
auto *OldTy = OldLI.getType();
19511950

1952-
if (DL.getTypeStoreSizeInBits(NewTy) == DL.getTypeSizeInBits(OldTy) &&
1953-
NewTy->isIntegerTy()) {
1954-
// An integer with the same number of bits - give it the range
1955-
// metadata!.
1956-
NewLI.setMetadata(LLVMContext::MD_range, N);
1951+
// Give up unless it is converted to a pointer where there is a single very
1952+
// valuable mapping we can do reliably.
1953+
// FIXME: It would be nice to propagate this in more ways, but the type
1954+
// conversions make it hard.
1955+
if (!NewTy->isPointerTy())
19571956
return;
1958-
}
19591957

1960-
if (NewTy->isPointerTy()) {
1961-
// Try to convert the !range metadata to !nonnull metadata on the
1962-
// new pointer.
1963-
unsigned BitWidth = DL.getTypeSizeInBits(NewTy);
1964-
if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
1965-
MDNode *NN = MDNode::get(OldLI.getContext(), None);
1966-
NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
1967-
}
1958+
unsigned BitWidth = DL.getTypeSizeInBits(NewTy);
1959+
if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
1960+
MDNode *NN = MDNode::get(OldLI.getContext(), None);
1961+
NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
19681962
}
19691963
}
19701964

lib/Transforms/Utils/PromoteMemoryToRegister.cpp

Lines changed: 17 additions & 147 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717

1818
#include "llvm/ADT/ArrayRef.h"
1919
#include "llvm/ADT/DenseMap.h"
20-
#include "llvm/ADT/Optional.h"
2120
#include "llvm/ADT/STLExtras.h"
2221
#include "llvm/ADT/SmallPtrSet.h"
2322
#include "llvm/ADT/SmallVector.h"
@@ -49,7 +48,7 @@
4948
#include "llvm/Transforms/Utils/Local.h"
5049
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
5150
#include <algorithm>
52-
51+
#include <cassert>
5352
#include <iterator>
5453
#include <utility>
5554
#include <vector>
@@ -178,16 +177,6 @@ class RenamePassData {
178177
ValVector Values;
179178
};
180179

181-
/// \brief Semi-open interval of instructions that are guaranteed to
182-
/// all execute if the first one does.
183-
class GuaranteedExecutionRange {
184-
public:
185-
unsigned Start;
186-
unsigned End;
187-
188-
GuaranteedExecutionRange(unsigned S, unsigned E) : Start(S), End(E) {}
189-
};
190-
191180
/// \brief This assigns and keeps a per-bb relative ordering of load/store
192181
/// instructions in the block that directly load or store an alloca.
193182
///
@@ -201,108 +190,14 @@ class LargeBlockInfo {
201190
/// the block.
202191
DenseMap<const Instruction *, unsigned> InstNumbers;
203192

204-
/// \brief For each basic block we track, keep track of the intervals
205-
/// of instruction numbers of instructions that transfer control
206-
/// to their successors, for propagating metadata.
207-
DenseMap<const BasicBlock *,
208-
Optional<SmallVector<GuaranteedExecutionRange, 4>>>
209-
GuaranteedExecutionIntervals;
210-
211193
public:
212-
/// This code looks for stores to allocas, and for loads both for
213-
/// allocas and for transferring metadata.
194+
195+
/// This code only looks at accesses to allocas.
214196
static bool isInterestingInstruction(const Instruction *I) {
215-
return isa<LoadInst>(I) ||
197+
return (isa<LoadInst>(I) && isa<AllocaInst>(I->getOperand(0))) ||
216198
(isa<StoreInst>(I) && isa<AllocaInst>(I->getOperand(1)));
217199
}
218200

219-
/// Compute the GuaranteedExecutionIntervals for a given BB.
220-
///
221-
/// This is valid and remains valid as long as each interesting
222-
/// instruction (see isInterestingInstruction) that
223-
/// A) existed when this LBI was cleared
224-
/// B) has not been deleted (deleting interesting instructions is fine)
225-
/// are run in the same program executions and in the same order
226-
/// as when this LBI was cleared.
227-
///
228-
/// Because `PromoteMemoryToRegister` does not move memory loads at
229-
/// all, this assumption is satisfied in this pass.
230-
SmallVector<GuaranteedExecutionRange, 4> computeGEI(const BasicBlock *BB) {
231-
SmallVector<GuaranteedExecutionRange, 4> GuaranteedExecutionIntervals;
232-
233-
unsigned InstNo = 0;
234-
bool InRange = false;
235-
unsigned FirstInstInRange = 0;
236-
for (const Instruction &BBI : *BB) {
237-
if (isGuaranteedToTransferExecutionToSuccessor(&BBI)) {
238-
if (!InRange && isInterestingInstruction(&BBI)) {
239-
InRange = true;
240-
FirstInstInRange = InstNo;
241-
}
242-
} else {
243-
if (InRange) {
244-
assert(FirstInstInRange < InstNo &&
245-
"Can't push an empty range here.");
246-
GuaranteedExecutionIntervals.emplace_back(FirstInstInRange, InstNo);
247-
}
248-
InRange = false;
249-
}
250-
251-
if (isInterestingInstruction(&BBI)) {
252-
auto It = InstNumbers.find(&BBI);
253-
assert(It != InstNumbers.end() && InstNo <= It->second &&
254-
"missing number for interesting instruction");
255-
InstNo = It->second + 1;
256-
}
257-
}
258-
259-
if (InRange) {
260-
assert(FirstInstInRange < InstNo && "Can't push an empty range here.");
261-
GuaranteedExecutionIntervals.emplace_back(FirstInstInRange, InstNo);
262-
}
263-
264-
return GuaranteedExecutionIntervals;
265-
}
266-
267-
/// Return true if, when CxtI executes, it is guaranteed that either
268-
/// I had executed already or that I is guaranteed to be later executed.
269-
///
270-
/// The useful property this guarantees is that if I exhibits undefined
271-
/// behavior under some circumstances, then the whole program will exhibit
272-
/// undefined behavior at CxtI.
273-
bool isGuaranteedToBeExecuted(const Instruction *CxtI, const Instruction *I) {
274-
const BasicBlock *BB = CxtI->getParent();
275-
276-
if (BB != I->getParent()) {
277-
// Instructions in different basic blocks, so control flow
278-
// can diverge between them (we could track this with
279-
// postdoms, but we don't bother).
280-
return false;
281-
}
282-
283-
unsigned Index1 = getInstructionIndex(CxtI);
284-
unsigned Index2 = getInstructionIndex(I);
285-
286-
auto &BBGEI = GuaranteedExecutionIntervals[BB];
287-
if (!BBGEI.hasValue()) {
288-
BBGEI.emplace(computeGEI(BB));
289-
}
290-
291-
// We want to check whether I and CxtI are in the same range. To do that,
292-
// we notice that CxtI can only be in the first range R where
293-
// CxtI.end < R.end. If we find that range using binary search,
294-
// we can check whether I and CxtI are both in it.
295-
GuaranteedExecutionRange Bound(Index1, Index1);
296-
auto R = std::upper_bound(
297-
BBGEI->begin(), BBGEI->end(), Bound,
298-
[](GuaranteedExecutionRange I_, GuaranteedExecutionRange R) {
299-
return I_.End < R.End;
300-
});
301-
302-
return R != BBGEI->end() && R->Start <= Index1 && Index1 < R->End &&
303-
R->Start <= Index2 && Index2 < R->End;
304-
}
305-
306201
/// Get or calculate the index of the specified instruction.
307202
unsigned getInstructionIndex(const Instruction *I) {
308203
assert(isInterestingInstruction(I) &&
@@ -318,11 +213,9 @@ class LargeBlockInfo {
318213
// avoid gratuitus rescans.
319214
const BasicBlock *BB = I->getParent();
320215
unsigned InstNo = 0;
321-
GuaranteedExecutionIntervals.erase(BB);
322216
for (const Instruction &BBI : *BB)
323217
if (isInterestingInstruction(&BBI))
324218
InstNumbers[&BBI] = InstNo++;
325-
326219
It = InstNumbers.find(I);
327220

328221
assert(It != InstNumbers.end() && "Didn't insert instruction?");
@@ -331,10 +224,7 @@ class LargeBlockInfo {
331224

332225
void deleteValue(const Instruction *I) { InstNumbers.erase(I); }
333226

334-
void clear() {
335-
InstNumbers.clear();
336-
GuaranteedExecutionIntervals.clear();
337-
}
227+
void clear() { InstNumbers.clear(); }
338228
};
339229

340230
struct PromoteMem2Reg {
@@ -412,7 +302,7 @@ struct PromoteMem2Reg {
412302
const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
413303
SmallPtrSetImpl<BasicBlock *> &LiveInBlocks);
414304
void RenamePass(BasicBlock *BB, BasicBlock *Pred,
415-
RenamePassData::ValVector &IncVals, LargeBlockInfo &LBI,
305+
RenamePassData::ValVector &IncVals,
416306
std::vector<RenamePassData> &Worklist);
417307
bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version);
418308
};
@@ -431,29 +321,6 @@ static void addAssumeNonNull(AssumptionCache *AC, LoadInst *LI) {
431321
AC->registerAssumption(CI);
432322
}
433323

434-
static void addAssumptionsFromMetadata(LoadInst *LI, Value *ReplVal,
435-
DominatorTree &DT, const DataLayout &DL,
436-
LargeBlockInfo &LBI,
437-
AssumptionCache *AC) {
438-
if (LI->getMetadata(LLVMContext::MD_nonnull) &&
439-
!isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT)) {
440-
addAssumeNonNull(AC, LI);
441-
}
442-
443-
if (auto *N = LI->getMetadata(LLVMContext::MD_range)) {
444-
// Range metadata is harder to use as an assumption,
445-
// so don't try to add one, but *do* try to copy
446-
// the metadata to a load in the same BB.
447-
if (LoadInst *NewLI = dyn_cast<LoadInst>(ReplVal)) {
448-
DEBUG(dbgs() << "trying to move !range metadata from" << *LI << " to"
449-
<< *NewLI << "\n");
450-
if (LBI.isGuaranteedToBeExecuted(LI, NewLI)) {
451-
copyRangeMetadata(DL, *LI, N, *NewLI);
452-
}
453-
}
454-
}
455-
}
456-
457324
static void removeLifetimeIntrinsicUsers(AllocaInst *AI) {
458325
// Knowing that this alloca is promotable, we know that it's safe to kill all
459326
// instructions except for load and store.
@@ -542,7 +409,9 @@ static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info,
542409
// If the load was marked as nonnull we don't want to lose
543410
// that information when we erase this Load. So we preserve
544411
// it with an assume.
545-
addAssumptionsFromMetadata(LI, ReplVal, DT, DL, LBI, AC);
412+
if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
413+
!isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
414+
addAssumeNonNull(AC, LI);
546415

547416
LI->replaceAllUsesWith(ReplVal);
548417
LI->eraseFromParent();
@@ -636,7 +505,9 @@ static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
636505
// Note, if the load was marked as nonnull we don't want to lose that
637506
// information when we erase it. So we preserve it with an assume.
638507
Value *ReplVal = std::prev(I)->second->getOperand(0);
639-
addAssumptionsFromMetadata(LI, ReplVal, DT, DL, LBI, AC);
508+
if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
509+
!isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
510+
addAssumeNonNull(AC, LI);
640511

641512
LI->replaceAllUsesWith(ReplVal);
642513
}
@@ -772,6 +643,7 @@ void PromoteMem2Reg::run() {
772643

773644
if (Allocas.empty())
774645
return; // All of the allocas must have been trivial!
646+
775647
LBI.clear();
776648

777649
// Set the incoming values for the basic block to be null values for all of
@@ -789,10 +661,9 @@ void PromoteMem2Reg::run() {
789661
RenamePassData RPD = std::move(RenamePassWorkList.back());
790662
RenamePassWorkList.pop_back();
791663
// RenamePass may add new worklist entries.
792-
RenamePass(RPD.BB, RPD.Pred, RPD.Values, LBI, RenamePassWorkList);
664+
RenamePass(RPD.BB, RPD.Pred, RPD.Values, RenamePassWorkList);
793665
} while (!RenamePassWorkList.empty());
794666

795-
LBI.clear();
796667
// The renamer uses the Visited set to avoid infinite loops. Clear it now.
797668
Visited.clear();
798669

@@ -1004,7 +875,6 @@ bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo,
1004875
/// predecessor block Pred.
1005876
void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred,
1006877
RenamePassData::ValVector &IncomingVals,
1007-
LargeBlockInfo &LBI,
1008878
std::vector<RenamePassData> &Worklist) {
1009879
NextIteration:
1010880
// If we are inserting any phi nodes into this BB, they will already be in the
@@ -1071,12 +941,13 @@ void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred,
1071941
// If the load was marked as nonnull we don't want to lose
1072942
// that information when we erase this Load. So we preserve
1073943
// it with an assume.
1074-
addAssumptionsFromMetadata(LI, V, DT, SQ.DL, LBI, AC);
944+
if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
945+
!isKnownNonZero(V, SQ.DL, 0, AC, LI, &DT))
946+
addAssumeNonNull(AC, LI);
1075947

1076948
// Anything using the load now uses the current value.
1077949
LI->replaceAllUsesWith(V);
1078950
BB->getInstList().erase(LI);
1079-
LBI.deleteValue(LI);
1080951
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1081952
// Delete this instruction and mark the name as the current holder of the
1082953
// value
@@ -1094,7 +965,6 @@ void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred,
1094965
for (DbgInfoIntrinsic *DII : AllocaDbgDeclares[ai->second])
1095966
ConvertDebugDeclareToDebugValue(DII, SI, DIB);
1096967
BB->getInstList().erase(SI);
1097-
LBI.deleteValue(SI);
1098968
}
1099969
}
1100970

test/Transforms/SROA/preserve-nonnull.ll

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,6 @@
33
; Make sure that SROA doesn't lose nonnull metadata
44
; on loads from allocas that get optimized out.
55

6-
%pair = type { i64, [0 x i64], [1 x i64] }
7-
86
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1)
97

108
; Check that we do basic propagation of nonnull when rewriting.
@@ -44,23 +42,6 @@ entry:
4442
ret float* %ret
4543
}
4644

47-
; Make sure we propagate the !range attribute when we expand loads.
48-
define i64 @propagate_range(%pair* dereferenceable(16)) {
49-
; CHECK-LABEL: define i64 @propagate_range(
50-
; CHECK-NEXT: start:
51-
; CHECK-NEXT: %[[SROA_IDX:.*]] = getelementptr inbounds %pair
52-
; CHECK-NEXT: %[[RESULT:.*]] = load i64, i64* %[[SROA_IDX]], align 8, !range !1
53-
; CHECK: ret i64 %[[RESULT]]
54-
start:
55-
%a = alloca %pair
56-
%1 = bitcast %pair* %0 to i8*
57-
%2 = bitcast %pair* %a to i8*
58-
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %1, i64 16, i32 8, i1 false)
59-
%3 = getelementptr inbounds %pair, %pair* %a, i32 0, i32 0
60-
%4 = load i64, i64* %3, !range !1
61-
ret i64 %4
62-
}
63-
6445
; Make sure we properly handle the !nonnull attribute when we convert
6546
; a pointer load to an integer load.
6647
; FIXME: While this doesn't do anythnig actively harmful today, it really
@@ -109,4 +90,3 @@ entry:
10990
}
11091

11192
!0 = !{}
112-
!1 = !{i64 0, i64 2}

0 commit comments

Comments
 (0)