21
21
#include " clang/AST/GlobalDecl.h"
22
22
#include " clang/Basic/Builtins.h"
23
23
#include " clang/CIR/Dialect/IR/CIRDialect.h"
24
+ #include " clang/CIR/Dialect/IR/CIROpsEnums.h"
24
25
#include " clang/CIR/Dialect/IR/CIRTypes.h"
25
26
#include " llvm/Support/Casting.h"
26
27
#include " llvm/Support/ErrorHandling.h"
@@ -126,6 +127,7 @@ static Address buildPointerWithAlignment(const Expr *E,
126
127
if (PtrTy->getPointeeType ()->isVoidType ())
127
128
break ;
128
129
assert (!UnimplementedFeature::tbaa ());
130
+
129
131
LValueBaseInfo InnerBaseInfo;
130
132
Address Addr = CGF.buildPointerWithAlignment (
131
133
CE->getSubExpr (), &InnerBaseInfo, IsKnownNonNull);
@@ -209,13 +211,79 @@ static Address buildPointerWithAlignment(const Expr *E,
209
211
return Address (CGF.buildScalarExpr (E), Align);
210
212
}
211
213
214
+ // / Helper method to check if the underlying ABI is AAPCS
215
+ static bool isAAPCS (const TargetInfo &TargetInfo) {
216
+ return TargetInfo.getABI ().startswith (" aapcs" );
217
+ }
218
+
219
+ Address CIRGenFunction::getAddrOfBitFieldStorage (LValue base,
220
+ const FieldDecl *field,
221
+ unsigned index,
222
+ unsigned size) {
223
+ if (index == 0 )
224
+ return base.getAddress ();
225
+
226
+ auto loc = getLoc (field->getLocation ());
227
+ auto fieldType = builder.getUIntNTy (size);
228
+
229
+ auto fieldPtr =
230
+ mlir::cir::PointerType::get (getBuilder ().getContext (), fieldType);
231
+ auto sea = getBuilder ().createGetMember (
232
+ loc, fieldPtr, base.getPointer (), field->getName (), index );
233
+
234
+ return Address (sea, CharUnits::One ());
235
+ }
236
+
237
+ static bool useVolatileForBitField (const CIRGenModule &cgm, LValue base,
238
+ const CIRGenBitFieldInfo &info,
239
+ const FieldDecl *field) {
240
+ return isAAPCS (cgm.getTarget ()) && cgm.getCodeGenOpts ().AAPCSBitfieldWidth &&
241
+ info.VolatileStorageSize != 0 &&
242
+ field->getType ()
243
+ .withCVRQualifiers (base.getVRQualifiers ())
244
+ .isVolatileQualified ();
245
+ }
246
+
247
+ LValue CIRGenFunction::buildLValueForBitField (LValue base,
248
+ const FieldDecl *field) {
249
+
250
+ LValueBaseInfo BaseInfo = base.getBaseInfo ();
251
+ const RecordDecl *rec = field->getParent ();
252
+ auto &layout = CGM.getTypes ().getCIRGenRecordLayout (field->getParent ());
253
+ auto &info = layout.getBitFieldInfo (field);
254
+ auto useVolatile = useVolatileForBitField (CGM, base, info, field);
255
+ unsigned Idx = layout.getCIRFieldNo (field);
256
+
257
+ if (useVolatile ||
258
+ (IsInPreservedAIRegion ||
259
+ (getDebugInfo () && rec->hasAttr <BPFPreserveAccessIndexAttr>()))) {
260
+ llvm_unreachable (" NYI" );
261
+ }
262
+
263
+ const unsigned SS = useVolatile ? info.VolatileStorageSize : info.StorageSize ;
264
+ Address Addr = getAddrOfBitFieldStorage (base, field, Idx, SS);
265
+
266
+ // Get the access type.
267
+ mlir::Type FieldIntTy = builder.getUIntNTy (SS);
268
+
269
+ auto loc = getLoc (field->getLocation ());
270
+ if (Addr.getElementType () != FieldIntTy)
271
+ Addr = builder.createElementBitCast (loc, Addr, FieldIntTy);
272
+
273
+ QualType fieldType =
274
+ field->getType ().withCVRQualifiers (base.getVRQualifiers ());
275
+
276
+ assert (!UnimplementedFeature::tbaa () && " NYI TBAA for bit fields" );
277
+ LValueBaseInfo FieldBaseInfo (BaseInfo.getAlignmentSource ());
278
+ return LValue::MakeBitfield (Addr, info, fieldType, FieldBaseInfo);
279
+ }
280
+
212
281
LValue CIRGenFunction::buildLValueForField (LValue base,
213
282
const FieldDecl *field) {
214
283
LValueBaseInfo BaseInfo = base.getBaseInfo ();
215
284
216
- if (field->isBitField ()) {
217
- llvm_unreachable (" NYI" );
218
- }
285
+ if (field->isBitField ())
286
+ return buildLValueForBitField (base, field);
219
287
220
288
// Fields of may-alias structures are may-alais themselves.
221
289
// FIXME: this hould get propagated down through anonymous structs and unions.
@@ -520,12 +588,55 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue,
520
588
// / method emits the address of the lvalue, then loads the result as an rvalue,
521
589
// / returning the rvalue.
522
590
RValue CIRGenFunction::buildLoadOfLValue (LValue LV, SourceLocation Loc) {
523
- assert (LV.isSimple () && " not implemented" );
524
591
assert (!LV.getType ()->isFunctionType ());
525
592
assert (!(LV.getType ()->isConstantMatrixType ()) && " not implemented" );
526
593
527
- // Everything needs a load.
528
- return RValue::get (buildLoadOfScalar (LV, Loc));
594
+ if (LV.isBitField ())
595
+ return buildLoadOfBitfieldLValue (LV, Loc);
596
+
597
+ if (LV.isSimple ())
598
+ return RValue::get (buildLoadOfScalar (LV, Loc));
599
+ llvm_unreachable (" NYI" );
600
+ }
601
+
602
+ RValue CIRGenFunction::buildLoadOfBitfieldLValue (LValue LV,
603
+ SourceLocation Loc) {
604
+ const CIRGenBitFieldInfo &Info = LV.getBitFieldInfo ();
605
+
606
+ // Get the output type.
607
+ mlir::Type ResLTy = convertType (LV.getType ());
608
+ Address Ptr = LV.getBitFieldAddress ();
609
+ mlir::Value Val = builder.createLoad (getLoc (Loc), Ptr );
610
+ auto ValWidth = Val.getType ().cast <IntType>().getWidth ();
611
+
612
+ bool UseVolatile = LV.isVolatileQualified () &&
613
+ Info.VolatileStorageSize != 0 && isAAPCS (CGM.getTarget ());
614
+ const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset ;
615
+ const unsigned StorageSize =
616
+ UseVolatile ? Info.VolatileStorageSize : Info.StorageSize ;
617
+
618
+ if (Info.IsSigned ) {
619
+ assert (static_cast <unsigned >(Offset + Info.Size ) <= StorageSize);
620
+
621
+ mlir::Type typ = builder.getSIntNTy (ValWidth);
622
+ Val = builder.createIntCast (Val, typ);
623
+
624
+ unsigned HighBits = StorageSize - Offset - Info.Size ;
625
+ if (HighBits)
626
+ Val = builder.createShiftLeft (Val, HighBits);
627
+ if (Offset + HighBits)
628
+ Val = builder.createShiftRight (Val, Offset + HighBits);
629
+ } else {
630
+ if (Offset)
631
+ Val = builder.createShiftRight (Val, Offset);
632
+
633
+ if (static_cast <unsigned >(Offset) + Info.Size < StorageSize)
634
+ Val = builder.createAnd (Val,
635
+ llvm::APInt::getLowBitsSet (ValWidth, Info.Size ));
636
+ }
637
+ Val = builder.createIntCast (Val, ResLTy);
638
+ assert (!UnimplementedFeature::emitScalarRangeCheck () && " NYI" );
639
+ return RValue::get (Val);
529
640
}
530
641
531
642
void CIRGenFunction::buildStoreThroughLValue (RValue Src, LValue Dst) {
@@ -548,6 +659,83 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) {
548
659
buildStoreOfScalar (Src.getScalarVal (), Dst);
549
660
}
550
661
662
+ void CIRGenFunction::buildStoreThroughBitfieldLValue (RValue Src, LValue Dst,
663
+ mlir::Value &Result) {
664
+ const CIRGenBitFieldInfo &Info = Dst.getBitFieldInfo ();
665
+ mlir::Type ResLTy = getTypes ().convertTypeForMem (Dst.getType ());
666
+ Address Ptr = Dst.getBitFieldAddress ();
667
+
668
+ // Get the source value, truncated to the width of the bit-field.
669
+ mlir::Value SrcVal = Src.getScalarVal ();
670
+
671
+ // Cast the source to the storage type and shift it into place.
672
+ SrcVal = builder.createIntCast (SrcVal, Ptr .getElementType ());
673
+ auto SrcWidth = SrcVal.getType ().cast <IntType>().getWidth ();
674
+ mlir::Value MaskedVal = SrcVal;
675
+
676
+ const bool UseVolatile =
677
+ CGM.getCodeGenOpts ().AAPCSBitfieldWidth && Dst.isVolatileQualified () &&
678
+ Info.VolatileStorageSize != 0 && isAAPCS (CGM.getTarget ());
679
+ const unsigned StorageSize =
680
+ UseVolatile ? Info.VolatileStorageSize : Info.StorageSize ;
681
+ const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset ;
682
+ // See if there are other bits in the bitfield's storage we'll need to load
683
+ // and mask together with source before storing.
684
+ if (StorageSize != Info.Size ) {
685
+ assert (StorageSize > Info.Size && " Invalid bitfield size." );
686
+
687
+ mlir::Value Val = buildLoadOfScalar (Dst, Dst.getPointer ().getLoc ());
688
+
689
+ // Mask the source value as needed.
690
+ if (!hasBooleanRepresentation (Dst.getType ()))
691
+ SrcVal = builder.createAnd (
692
+ SrcVal, llvm::APInt::getLowBitsSet (SrcWidth, Info.Size ));
693
+
694
+ MaskedVal = SrcVal;
695
+ if (Offset)
696
+ SrcVal = builder.createShiftLeft (SrcVal, Offset);
697
+
698
+ // Mask out the original value.
699
+ Val = builder.createAnd (
700
+ Val, ~llvm::APInt::getBitsSet (SrcWidth, Offset, Offset + Info.Size ));
701
+
702
+ // Or together the unchanged values and the source value.
703
+ SrcVal = builder.createOr (Val, SrcVal);
704
+
705
+ } else {
706
+ // According to the AACPS:
707
+ // When a volatile bit-field is written, and its container does not overlap
708
+ // with any non-bit-field member, its container must be read exactly once
709
+ // and written exactly once using the access width appropriate to the type
710
+ // of the container. The two accesses are not atomic.
711
+ if (Dst.isVolatileQualified () && isAAPCS (CGM.getTarget ()) &&
712
+ CGM.getCodeGenOpts ().ForceAAPCSBitfieldLoad )
713
+ llvm_unreachable (" volatile bit-field is not implemented for the AACPS" );
714
+ }
715
+
716
+ // Write the new value back out.
717
+ // TODO: constant matrix type, volatile, no init, non temporal, TBAA
718
+ buildStoreOfScalar (SrcVal, Ptr , Dst.isVolatileQualified (), Dst.getType (),
719
+ Dst.getBaseInfo (), false , false );
720
+
721
+ // Return the new value of the bit-field.
722
+ mlir::Value ResultVal = MaskedVal;
723
+ ResultVal = builder.createIntCast (ResultVal, ResLTy);
724
+
725
+ // Sign extend the value if needed.
726
+ if (Info.IsSigned ) {
727
+ assert (Info.Size <= StorageSize);
728
+ unsigned HighBits = StorageSize - Info.Size ;
729
+
730
+ if (HighBits) {
731
+ ResultVal = builder.createShiftLeft (ResultVal, HighBits);
732
+ ResultVal = builder.createShiftRight (ResultVal, HighBits);
733
+ }
734
+ }
735
+
736
+ Result = buildFromMemory (ResultVal, Dst.getType ());
737
+ }
738
+
551
739
static LValue buildGlobalVarDeclLValue (CIRGenFunction &CGF, const Expr *E,
552
740
const VarDecl *VD) {
553
741
QualType T = E->getType ();
@@ -771,7 +959,13 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) {
771
959
LValue LV = buildLValue (E->getLHS ());
772
960
773
961
SourceLocRAIIObject Loc{*this , getLoc (E->getSourceRange ())};
774
- buildStoreThroughLValue (RV, LV);
962
+ if (LV.isBitField ()) {
963
+ mlir::Value result;
964
+ buildStoreThroughBitfieldLValue (RV, LV, result);
965
+ } else {
966
+ buildStoreThroughLValue (RV, LV);
967
+ }
968
+
775
969
assert (!getContext ().getLangOpts ().OpenMP &&
776
970
" last priv cond not implemented" );
777
971
return LV;
@@ -2205,6 +2399,13 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty,
2205
2399
2206
2400
mlir::Value CIRGenFunction::buildLoadOfScalar (LValue lvalue,
2207
2401
SourceLocation Loc) {
2402
+ return buildLoadOfScalar (lvalue.getAddress (), lvalue.isVolatile (),
2403
+ lvalue.getType (), getLoc (Loc), lvalue.getBaseInfo (),
2404
+ lvalue.isNontemporal ());
2405
+ }
2406
+
2407
+ mlir::Value CIRGenFunction::buildLoadOfScalar (LValue lvalue,
2408
+ mlir::Location Loc) {
2208
2409
return buildLoadOfScalar (lvalue.getAddress (), lvalue.isVolatile (),
2209
2410
lvalue.getType (), Loc, lvalue.getBaseInfo (),
2210
2411
lvalue.isNontemporal ());
@@ -2222,6 +2423,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,
2222
2423
QualType Ty, SourceLocation Loc,
2223
2424
LValueBaseInfo BaseInfo,
2224
2425
bool isNontemporal) {
2426
+ return buildLoadOfScalar (Addr, Volatile, Ty, getLoc (Loc), BaseInfo,
2427
+ isNontemporal);
2428
+ }
2429
+
2430
+ mlir::Value CIRGenFunction::buildLoadOfScalar (Address Addr, bool Volatile,
2431
+ QualType Ty, mlir::Location Loc,
2432
+ LValueBaseInfo BaseInfo,
2433
+ bool isNontemporal) {
2225
2434
if (!CGM.getCodeGenOpts ().PreserveVec3Type ) {
2226
2435
if (Ty->isVectorType ()) {
2227
2436
llvm_unreachable (" NYI" );
@@ -2235,15 +2444,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,
2235
2444
}
2236
2445
2237
2446
mlir::cir::LoadOp Load = builder.create <mlir::cir::LoadOp>(
2238
- getLoc ( Loc) , Addr.getElementType (), Addr.getPointer ());
2447
+ Loc, Addr.getElementType (), Addr.getPointer ());
2239
2448
2240
2449
if (isNontemporal) {
2241
2450
llvm_unreachable (" NYI" );
2242
2451
}
2243
-
2244
- // TODO: TBAA
2245
-
2246
- // TODO: buildScalarRangeCheck
2452
+
2453
+ assert (!UnimplementedFeature::tbaa () && " NYI" );
2454
+ assert (!UnimplementedFeature::emitScalarRangeCheck () && " NYI" );
2247
2455
2248
2456
return buildFromMemory (Load, Ty);
2249
2457
}
0 commit comments