@@ -184,6 +184,19 @@ struct mlxsw_sp_acl_tcam_vgroup {
184
184
struct mlxsw_sp_acl_tcam_rehash_ctx {
185
185
void * hints_priv ;
186
186
bool this_is_rollback ;
187
+ struct mlxsw_sp_acl_tcam_vchunk * current_vchunk ; /* vchunk being
188
+ * currently migrated.
189
+ */
190
+ struct mlxsw_sp_acl_tcam_ventry * start_ventry ; /* ventry to start
191
+ * migration from in
192
+ * a vchunk being
193
+ * currently migrated.
194
+ */
195
+ struct mlxsw_sp_acl_tcam_ventry * stop_ventry ; /* ventry to stop
196
+ * migration at
197
+ * a vchunk being
198
+ * currently migrated.
199
+ */
187
200
};
188
201
189
202
struct mlxsw_sp_acl_tcam_vregion {
@@ -755,6 +768,31 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
755
768
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule (vregion );
756
769
}
757
770
771
+ static void
772
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed (struct mlxsw_sp_acl_tcam_vchunk * vchunk )
773
+ {
774
+ struct mlxsw_sp_acl_tcam_vregion * vregion = vchunk -> vregion ;
775
+
776
+ /* If a rule was added or deleted from vchunk which is currently
777
+ * under rehash migration, we have to reset the ventry pointers
778
+ * to make sure all rules are properly migrated.
779
+ */
780
+ if (vregion -> rehash .ctx .current_vchunk == vchunk ) {
781
+ vregion -> rehash .ctx .start_ventry = NULL ;
782
+ vregion -> rehash .ctx .stop_ventry = NULL ;
783
+ }
784
+ }
785
+
786
+ static void
787
+ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed (struct mlxsw_sp_acl_tcam_vregion * vregion )
788
+ {
789
+ /* If a chunk was added or deleted from vregion we have to reset
790
+ * the current chunk pointer to make sure all chunks
791
+ * are properly migrated.
792
+ */
793
+ vregion -> rehash .ctx .current_vchunk = NULL ;
794
+ }
795
+
758
796
static struct mlxsw_sp_acl_tcam_vregion *
759
797
mlxsw_sp_acl_tcam_vregion_create (struct mlxsw_sp * mlxsw_sp ,
760
798
struct mlxsw_sp_acl_tcam_vgroup * vgroup ,
@@ -989,6 +1027,7 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
989
1027
goto err_chunk_create ;
990
1028
}
991
1029
1030
+ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed (vregion );
992
1031
list_add_tail (& vchunk -> list , & vregion -> vchunk_list );
993
1032
mutex_unlock (& vregion -> lock );
994
1033
@@ -1012,6 +1051,7 @@ mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
1012
1051
struct mlxsw_sp_acl_tcam_vgroup * vgroup = vchunk -> vgroup ;
1013
1052
1014
1053
mutex_lock (& vregion -> lock );
1054
+ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed (vregion );
1015
1055
list_del (& vchunk -> list );
1016
1056
if (vchunk -> chunk2 )
1017
1057
mlxsw_sp_acl_tcam_chunk_destroy (mlxsw_sp , vchunk -> chunk2 );
@@ -1141,6 +1181,7 @@ static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
1141
1181
}
1142
1182
1143
1183
list_add_tail (& ventry -> list , & vchunk -> ventry_list );
1184
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed (vchunk );
1144
1185
mutex_unlock (& vregion -> lock );
1145
1186
1146
1187
return 0 ;
@@ -1157,6 +1198,7 @@ static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
1157
1198
struct mlxsw_sp_acl_tcam_vregion * vregion = vchunk -> vregion ;
1158
1199
1159
1200
mutex_lock (& vregion -> lock );
1201
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed (vchunk );
1160
1202
list_del (& ventry -> list );
1161
1203
mlxsw_sp_acl_tcam_entry_destroy (mlxsw_sp , ventry -> entry );
1162
1204
mutex_unlock (& vregion -> lock );
@@ -1223,15 +1265,20 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
1223
1265
}
1224
1266
vchunk -> chunk2 = vchunk -> chunk ;
1225
1267
vchunk -> chunk = new_chunk ;
1268
+ ctx -> current_vchunk = vchunk ;
1269
+ ctx -> start_ventry = NULL ;
1270
+ ctx -> stop_ventry = NULL ;
1226
1271
return 0 ;
1227
1272
}
1228
1273
1229
1274
static void
1230
1275
mlxsw_sp_acl_tcam_vchunk_migrate_end (struct mlxsw_sp * mlxsw_sp ,
1231
- struct mlxsw_sp_acl_tcam_vchunk * vchunk )
1276
+ struct mlxsw_sp_acl_tcam_vchunk * vchunk ,
1277
+ struct mlxsw_sp_acl_tcam_rehash_ctx * ctx )
1232
1278
{
1233
1279
mlxsw_sp_acl_tcam_chunk_destroy (mlxsw_sp , vchunk -> chunk2 );
1234
1280
vchunk -> chunk2 = NULL ;
1281
+ ctx -> current_vchunk = NULL ;
1235
1282
}
1236
1283
1237
1284
static int
@@ -1254,7 +1301,22 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1254
1301
return 0 ;
1255
1302
}
1256
1303
1257
- list_for_each_entry (ventry , & vchunk -> ventry_list , list ) {
1304
+ /* If the migration got interrupted, we have the ventry to start from
1305
+ * stored in context.
1306
+ */
1307
+ if (ctx -> start_ventry )
1308
+ ventry = ctx -> start_ventry ;
1309
+ else
1310
+ ventry = list_first_entry (& vchunk -> ventry_list ,
1311
+ typeof (* ventry ), list );
1312
+
1313
+ list_for_each_entry_from (ventry , & vchunk -> ventry_list , list ) {
1314
+ /* During rollback, once we reach the ventry that failed
1315
+ * to migrate, we are done.
1316
+ */
1317
+ if (ventry == ctx -> stop_ventry )
1318
+ break ;
1319
+
1258
1320
err = mlxsw_sp_acl_tcam_ventry_migrate (mlxsw_sp , ventry ,
1259
1321
vchunk -> chunk , credits );
1260
1322
if (err ) {
@@ -1265,16 +1327,25 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1265
1327
* in vchunk->chunk.
1266
1328
*/
1267
1329
swap (vchunk -> chunk , vchunk -> chunk2 );
1330
+ /* The rollback has to be done from beginning of the
1331
+ * chunk, that is why we have to null the start_ventry.
1332
+ * However, we know where to stop the rollback,
1333
+ * at the current ventry.
1334
+ */
1335
+ ctx -> start_ventry = NULL ;
1336
+ ctx -> stop_ventry = ventry ;
1268
1337
return err ;
1269
1338
} else if (* credits < 0 ) {
1270
1339
/* We are out of credits, the rest of the ventries
1271
- * will be migrated later.
1340
+ * will be migrated later. Save the ventry
1341
+ * which we ended with.
1272
1342
*/
1343
+ ctx -> start_ventry = ventry ;
1273
1344
return 0 ;
1274
1345
}
1275
1346
}
1276
1347
1277
- mlxsw_sp_acl_tcam_vchunk_migrate_end (mlxsw_sp , vchunk );
1348
+ mlxsw_sp_acl_tcam_vchunk_migrate_end (mlxsw_sp , vchunk , ctx );
1278
1349
return 0 ;
1279
1350
}
1280
1351
@@ -1287,7 +1358,16 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
1287
1358
struct mlxsw_sp_acl_tcam_vchunk * vchunk ;
1288
1359
int err ;
1289
1360
1290
- list_for_each_entry (vchunk , & vregion -> vchunk_list , list ) {
1361
+ /* If the migration got interrupted, we have the vchunk
1362
+ * we are working on stored in context.
1363
+ */
1364
+ if (ctx -> current_vchunk )
1365
+ vchunk = ctx -> current_vchunk ;
1366
+ else
1367
+ vchunk = list_first_entry (& vregion -> vchunk_list ,
1368
+ typeof (* vchunk ), list );
1369
+
1370
+ list_for_each_entry_from (vchunk , & vregion -> vchunk_list , list ) {
1291
1371
err = mlxsw_sp_acl_tcam_vchunk_migrate_one (mlxsw_sp , vchunk ,
1292
1372
vregion -> region ,
1293
1373
ctx , credits );
@@ -1315,6 +1395,7 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
1315
1395
* to vregion->region.
1316
1396
*/
1317
1397
swap (vregion -> region , vregion -> region2 );
1398
+ ctx -> current_vchunk = NULL ;
1318
1399
ctx -> this_is_rollback = true;
1319
1400
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all (mlxsw_sp , vregion ,
1320
1401
ctx , credits );
0 commit comments