21
21
#include " clang/AST/GlobalDecl.h"
22
22
#include " clang/Basic/Builtins.h"
23
23
#include " clang/CIR/Dialect/IR/CIRDialect.h"
24
+ #include " clang/CIR/Dialect/IR/CIROpsEnums.h"
24
25
#include " clang/CIR/Dialect/IR/CIRTypes.h"
25
26
#include " llvm/Support/Casting.h"
26
27
#include " llvm/Support/ErrorHandling.h"
@@ -126,6 +127,7 @@ static Address buildPointerWithAlignment(const Expr *E,
126
127
if (PtrTy->getPointeeType ()->isVoidType ())
127
128
break ;
128
129
assert (!UnimplementedFeature::tbaa ());
130
+
129
131
LValueBaseInfo InnerBaseInfo;
130
132
Address Addr = CGF.buildPointerWithAlignment (
131
133
CE->getSubExpr (), &InnerBaseInfo, IsKnownNonNull);
@@ -209,13 +211,78 @@ static Address buildPointerWithAlignment(const Expr *E,
209
211
return Address (CGF.buildScalarExpr (E), Align);
210
212
}
211
213
214
+ // / Helper method to check if the underlying ABI is AAPCS
215
+ static bool isAAPCS (const TargetInfo &TargetInfo) {
216
+ return TargetInfo.getABI ().startswith (" aapcs" );
217
+ }
218
+
219
+ Address CIRGenFunction::getAddrOfField (LValue base, const FieldDecl *field,
220
+ unsigned index) {
221
+ if (index == 0 )
222
+ return base.getAddress ();
223
+
224
+ auto loc = getLoc (field->getLocation ());
225
+ auto fieldType = convertType (field->getType ());
226
+ auto fieldPtr =
227
+ mlir::cir::PointerType::get (getBuilder ().getContext (), fieldType);
228
+ auto sea = getBuilder ().createGetMember (
229
+ loc, fieldPtr, base.getPointer (), field->getName (), index);
230
+
231
+ return Address (sea, CharUnits::One ());
232
+ }
233
+
234
+ static bool useVolatileForBitField (const CIRGenModule &cgm, LValue base,
235
+ const CIRGenBitFieldInfo &info,
236
+ const FieldDecl *field) {
237
+ return isAAPCS (cgm.getTarget ()) && cgm.getCodeGenOpts ().AAPCSBitfieldWidth &&
238
+ info.VolatileStorageSize != 0 &&
239
+ field->getType ()
240
+ .withCVRQualifiers (base.getVRQualifiers ())
241
+ .isVolatileQualified ();
242
+ }
243
+
244
+ LValue CIRGenFunction::buildLValueForBitField (LValue base,
245
+ const FieldDecl *field) {
246
+
247
+ LValueBaseInfo BaseInfo = base.getBaseInfo ();
248
+ const RecordDecl *rec = field->getParent ();
249
+ auto &layout = CGM.getTypes ().getCIRGenRecordLayout (field->getParent ());
250
+ auto &info = layout.getBitFieldInfo (field);
251
+ auto useVolatile = useVolatileForBitField (CGM, base, info, field);
252
+ unsigned Idx = layout.getCIRFieldNo (field);
253
+
254
+ if (useVolatile ||
255
+ (IsInPreservedAIRegion ||
256
+ (getDebugInfo () && rec->hasAttr <BPFPreserveAccessIndexAttr>()))) {
257
+ llvm_unreachable (" NYI" );
258
+ }
259
+
260
+ Address Addr = getAddrOfField (base, field, Idx);
261
+
262
+ const unsigned SS = useVolatile ? info.VolatileStorageSize : info.StorageSize ;
263
+
264
+ // Get the access type.
265
+ mlir::Type FieldIntTy = builder.getUIntNTy (SS);
266
+
267
+ auto loc = getLoc (field->getLocation ());
268
+ if (Addr.getElementType () != FieldIntTy)
269
+ Addr = builder.createElementBitCast (loc, Addr, FieldIntTy);
270
+
271
+ QualType fieldType =
272
+ field->getType ().withCVRQualifiers (base.getVRQualifiers ());
273
+
274
+ assert (!UnimplementedFeature::tbaa () && " NYI TBAA for bit fields" );
275
+ LValueBaseInfo FieldBaseInfo (BaseInfo.getAlignmentSource ());
276
+ return LValue::MakeBitfield (Addr, info, fieldType, FieldBaseInfo);
277
+ }
278
+
212
279
LValue CIRGenFunction::buildLValueForField (LValue base,
213
280
const FieldDecl *field) {
281
+
214
282
LValueBaseInfo BaseInfo = base.getBaseInfo ();
215
283
216
- if (field->isBitField ()) {
217
- llvm_unreachable (" NYI" );
218
- }
284
+ if (field->isBitField ())
285
+ return buildLValueForBitField (base, field);
219
286
220
287
// Fields of may-alias structures are may-alais themselves.
221
288
// FIXME: this hould get propagated down through anonymous structs and unions.
@@ -518,12 +585,55 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue,
518
585
// / method emits the address of the lvalue, then loads the result as an rvalue,
519
586
// / returning the rvalue.
520
587
RValue CIRGenFunction::buildLoadOfLValue (LValue LV, SourceLocation Loc) {
521
- assert (LV.isSimple () && " not implemented" );
522
588
assert (!LV.getType ()->isFunctionType ());
523
589
assert (!(LV.getType ()->isConstantMatrixType ()) && " not implemented" );
524
590
525
- // Everything needs a load.
526
- return RValue::get (buildLoadOfScalar (LV, Loc));
591
+ if (LV.isBitField ())
592
+ return buildLoadOfBitfieldLValue (LV, Loc);
593
+
594
+ if (LV.isSimple ())
595
+ return RValue::get (buildLoadOfScalar (LV, Loc));
596
+ llvm_unreachable (" NYI" );
597
+ }
598
+
599
+ RValue CIRGenFunction::buildLoadOfBitfieldLValue (LValue LV,
600
+ SourceLocation Loc) {
601
+ const CIRGenBitFieldInfo &Info = LV.getBitFieldInfo ();
602
+
603
+ // Get the output type.
604
+ mlir::Type ResLTy = convertType (LV.getType ());
605
+ Address Ptr = LV.getBitFieldAddress ();
606
+ mlir::Value Val = builder.createLoad (getLoc (Loc), Ptr);
607
+ auto ValWidth = Val.getType ().cast <IntType>().getWidth ();
608
+
609
+ bool UseVolatile = LV.isVolatileQualified () &&
610
+ Info.VolatileStorageSize != 0 && isAAPCS (CGM.getTarget ());
611
+ const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset ;
612
+ const unsigned StorageSize =
613
+ UseVolatile ? Info.VolatileStorageSize : Info.StorageSize ;
614
+
615
+ if (Info.IsSigned ) {
616
+ assert (static_cast <unsigned >(Offset + Info.Size ) <= StorageSize);
617
+
618
+ mlir::Type typ = builder.getSIntNTy (ValWidth);
619
+ Val = builder.createIntCast (Val, typ);
620
+
621
+ unsigned HighBits = StorageSize - Offset - Info.Size ;
622
+ if (HighBits)
623
+ Val = builder.createShiftLeft (Val, HighBits);
624
+ if (Offset + HighBits)
625
+ Val = builder.createShiftRight (Val, Offset + HighBits);
626
+ } else {
627
+ if (Offset)
628
+ Val = builder.createShiftRight (Val, Offset);
629
+
630
+ if (static_cast <unsigned >(Offset) + Info.Size < StorageSize)
631
+ Val = builder.createAnd (Val,
632
+ llvm::APInt::getLowBitsSet (ValWidth, Info.Size ));
633
+ }
634
+ Val = builder.createIntCast (Val, ResLTy);
635
+ assert (!UnimplementedFeature::emitScalarRangeCheck () && " NYI" );
636
+ return RValue::get (Val);
527
637
}
528
638
529
639
void CIRGenFunction::buildStoreThroughLValue (RValue Src, LValue Dst) {
@@ -546,6 +656,81 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) {
546
656
buildStoreOfScalar (Src.getScalarVal (), Dst);
547
657
}
548
658
659
+ void CIRGenFunction::buildStoreThroughBitfieldLValue (RValue Src, LValue Dst,
660
+ mlir::Value &Result) {
661
+ const CIRGenBitFieldInfo &Info = Dst.getBitFieldInfo ();
662
+ mlir::Type ResLTy = getTypes ().convertTypeForMem (Dst.getType ());
663
+ Address Ptr = Dst.getBitFieldAddress ();
664
+
665
+ // Get the source value, truncated to the width of the bit-field.
666
+ mlir::Value SrcVal = Src.getScalarVal ();
667
+
668
+ // Cast the source to the storage type and shift it into place.
669
+ SrcVal = builder.createIntCast (SrcVal, Ptr.getElementType ());
670
+ auto SrcWidth = SrcVal.getType ().cast <IntType>().getWidth ();
671
+ mlir::Value MaskedVal = SrcVal;
672
+
673
+ const bool UseVolatile =
674
+ CGM.getCodeGenOpts ().AAPCSBitfieldWidth && Dst.isVolatileQualified () &&
675
+ Info.VolatileStorageSize != 0 && isAAPCS (CGM.getTarget ());
676
+ const unsigned StorageSize =
677
+ UseVolatile ? Info.VolatileStorageSize : Info.StorageSize ;
678
+ const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset ;
679
+ // See if there are other bits in the bitfield's storage we'll need to load
680
+ // and mask together with source before storing.
681
+ if (StorageSize != Info.Size ) {
682
+ assert (StorageSize > Info.Size && " Invalid bitfield size." );
683
+
684
+ mlir::Value Val = buildLoadOfScalar (Dst, Dst.getPointer ().getLoc ());
685
+
686
+ // Mask the source value as needed.
687
+ if (!hasBooleanRepresentation (Dst.getType ()))
688
+ SrcVal = builder.createAnd (
689
+ SrcVal, llvm::APInt::getLowBitsSet (SrcWidth, Info.Size ));
690
+
691
+ MaskedVal = SrcVal;
692
+ if (Offset)
693
+ SrcVal = builder.createShiftLeft (SrcVal, Offset);
694
+
695
+ // Mask out the original value.
696
+ Val = builder.createAnd (
697
+ Val, ~llvm::APInt::getBitsSet (SrcWidth, Offset, Offset + Info.Size ));
698
+
699
+ // Or together the unchanged values and the source value.
700
+ SrcVal = builder.createOr (Val, SrcVal);
701
+
702
+ } else {
703
+ // According to the AACPS:
704
+ // When a volatile bit-field is written, and its container does not overlap
705
+ // with any non-bit-field member, its container must be read exactly once
706
+ // and written exactly once using the access width appropriate to the type
707
+ // of the container. The two accesses are not atomic.
708
+ llvm_unreachable (" volatile bit-field is not implemented for the AACPS" );
709
+ }
710
+
711
+ // Write the new value back out.
712
+ // TODO: constant matrix type, volatile, no init, non temporal, TBAA
713
+ buildStoreOfScalar (SrcVal, Ptr, Dst.isVolatileQualified (), Dst.getType (),
714
+ Dst.getBaseInfo (), false , false );
715
+
716
+ // Return the new value of the bit-field.
717
+ mlir::Value ResultVal = MaskedVal;
718
+ ResultVal = builder.createIntCast (ResultVal, ResLTy);
719
+
720
+ // Sign extend the value if needed.
721
+ if (Info.IsSigned ) {
722
+ assert (Info.Size <= StorageSize);
723
+ unsigned HighBits = StorageSize - Info.Size ;
724
+
725
+ if (HighBits) {
726
+ ResultVal = builder.createShiftLeft (ResultVal, HighBits);
727
+ ResultVal = builder.createShiftRight (ResultVal, HighBits);
728
+ }
729
+ }
730
+
731
+ Result = buildFromMemory (ResultVal, Dst.getType ());
732
+ }
733
+
549
734
static LValue buildGlobalVarDeclLValue (CIRGenFunction &CGF, const Expr *E,
550
735
const VarDecl *VD) {
551
736
QualType T = E->getType ();
@@ -769,7 +954,13 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) {
769
954
LValue LV = buildLValue (E->getLHS ());
770
955
771
956
SourceLocRAIIObject Loc{*this , getLoc (E->getSourceRange ())};
772
- buildStoreThroughLValue (RV, LV);
957
+ if (LV.isBitField ()) {
958
+ mlir::Value result;
959
+ buildStoreThroughBitfieldLValue (RV, LV, result);
960
+ } else {
961
+ buildStoreThroughLValue (RV, LV);
962
+ }
963
+
773
964
assert (!getContext ().getLangOpts ().OpenMP &&
774
965
" last priv cond not implemented" );
775
966
return LV;
@@ -2203,6 +2394,13 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty,
2203
2394
2204
2395
mlir::Value CIRGenFunction::buildLoadOfScalar (LValue lvalue,
2205
2396
SourceLocation Loc) {
2397
+ return buildLoadOfScalar (lvalue.getAddress (), lvalue.isVolatile (),
2398
+ lvalue.getType (), getLoc (Loc), lvalue.getBaseInfo (),
2399
+ lvalue.isNontemporal ());
2400
+ }
2401
+
2402
+ mlir::Value CIRGenFunction::buildLoadOfScalar (LValue lvalue,
2403
+ mlir::Location Loc) {
2206
2404
return buildLoadOfScalar (lvalue.getAddress (), lvalue.isVolatile (),
2207
2405
lvalue.getType (), Loc, lvalue.getBaseInfo (),
2208
2406
lvalue.isNontemporal ());
@@ -2220,6 +2418,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,
2220
2418
QualType Ty, SourceLocation Loc,
2221
2419
LValueBaseInfo BaseInfo,
2222
2420
bool isNontemporal) {
2421
+ return buildLoadOfScalar (Addr, Volatile, Ty, getLoc (Loc), BaseInfo,
2422
+ isNontemporal);
2423
+ }
2424
+
2425
+ mlir::Value CIRGenFunction::buildLoadOfScalar (Address Addr, bool Volatile,
2426
+ QualType Ty, mlir::Location Loc,
2427
+ LValueBaseInfo BaseInfo,
2428
+ bool isNontemporal) {
2223
2429
if (!CGM.getCodeGenOpts ().PreserveVec3Type ) {
2224
2430
if (Ty->isVectorType ()) {
2225
2431
llvm_unreachable (" NYI" );
@@ -2233,15 +2439,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,
2233
2439
}
2234
2440
2235
2441
mlir::cir::LoadOp Load = builder.create <mlir::cir::LoadOp>(
2236
- getLoc ( Loc) , Addr.getElementType (), Addr.getPointer ());
2442
+ Loc, Addr.getElementType (), Addr.getPointer ());
2237
2443
2238
2444
if (isNontemporal) {
2239
2445
llvm_unreachable (" NYI" );
2240
2446
}
2241
-
2242
- // TODO: TBAA
2243
-
2244
- // TODO: buildScalarRangeCheck
2447
+
2448
+ assert (!UnimplementedFeature::tbaa () && " NYI" );
2449
+ assert (!UnimplementedFeature::emitScalarRangeCheck () && " NYI" );
2245
2450
2246
2451
return buildFromMemory (Load, Ty);
2247
2452
}
0 commit comments