@@ -95,25 +95,34 @@ public class RunServiceImpl implements RunService {
95
95
96
96
//@formatter:off
97
97
private static final String FIND_AUTOCOMPLETE = """
98
- SELECT * FROM (
99
- SELECT DISTINCT jsonb_object_keys(q) AS key
100
- FROM run, jsonb_path_query(run.data, ? ::jsonpath) q
101
- WHERE jsonb_typeof(q) = 'object') AS keys
102
- WHERE keys.key LIKE CONCAT(?, '%');
103
- """ ;
104
- protected static final String FIND_RUNS_WITH_URI = """
105
- SELECT id, testid
106
- FROM run
107
- WHERE NOT trashed
108
- AND (data->>'$schema' = ?1
109
- OR (CASE
110
- WHEN jsonb_typeof(data) = 'object' THEN ?1 IN (SELECT values.value->>'$schema' FROM jsonb_each(data) as values)
111
- WHEN jsonb_typeof(data) = 'array' THEN ?1 IN (SELECT jsonb_array_elements(data)->>'$schema')
112
- ELSE false
113
- END)
114
- OR (metadata IS NOT NULL AND ?1 IN (SELECT jsonb_array_elements(metadata)->>'$schema'))
115
- )
116
- """ ;
98
+ SELECT * FROM (
99
+ SELECT DISTINCT jsonb_object_keys(q) AS key
100
+ FROM run, jsonb_path_query(run.data, ? ::jsonpath) q
101
+ WHERE jsonb_typeof(q) = 'object') AS keys
102
+ WHERE keys.key LIKE CONCAT(?, '%');
103
+ """ ;
104
+ private static final String FIND_RUNS_WITH_URI = """
105
+ SELECT id, testid
106
+ FROM run
107
+ WHERE NOT trashed
108
+ AND (data->>'$schema' = ?1
109
+ OR (CASE
110
+ WHEN jsonb_typeof(data) = 'object' THEN ?1 IN (SELECT values.value->>'$schema' FROM jsonb_each(data) as values)
111
+ WHEN jsonb_typeof(data) = 'array' THEN ?1 IN (SELECT jsonb_array_elements(data)->>'$schema')
112
+ ELSE false
113
+ END)
114
+ OR (metadata IS NOT NULL AND ?1 IN (SELECT jsonb_array_elements(metadata)->>'$schema'))
115
+ )
116
+ """ ;
117
+
118
+ private static final String UPDATE_DATASET_SCHEMAS = """
119
+ WITH uris AS (
120
+ SELECT jsonb_array_elements(ds.data)->>'$schema' AS uri FROM dataset ds WHERE ds.id = ?1
121
+ ), indexed as (
122
+ SELECT uri, row_number() over () - 1 as index FROM uris
123
+ ) INSERT INTO dataset_schemas(dataset_id, uri, index, schema_id)
124
+ SELECT ?1 as dataset_id, indexed.uri, indexed.index, schema.id FROM indexed JOIN schema ON schema.uri = indexed.uri;
125
+ """ ;
117
126
//@formatter:on
118
127
private static final String [] CONDITION_SELECT_TERMINAL = { "==" , "!=" , "<>" , "<" , "<=" , ">" , ">=" , " " };
119
128
private static final String CHANGE_ACCESS = "UPDATE run SET owner = ?, access = ? WHERE id = ?" ;
@@ -188,46 +197,71 @@ void onNewOrUpdatedSchema(int schemaId) {
188
197
log .errorf ("Cannot process schema add/update: cannot load schema %d" , schemaId );
189
198
return ;
190
199
}
191
- processNewOrUpdatedSchema (schema );
192
- }
193
-
194
- @ Transactional
195
- void processNewOrUpdatedSchema (SchemaDAO schema ) {
196
- // we don't have to care about races with new runs
200
+ clearRunAndDatasetSchemas (schemaId );
197
201
findRunsWithUri (schema .uri , (runId , testId ) -> {
198
202
log .debugf ("Recalculate Datasets for run %d - schema %d (%s) changed" , runId , schema .id , schema .uri );
199
203
onNewOrUpdatedSchemaForRun (runId , schema .id );
200
204
});
201
205
}
202
206
203
207
void findRunsWithUri (String uri , BiConsumer <Integer , Integer > consumer ) {
204
- ScrollableResults <RunFromUri > results = session .createNativeQuery (FIND_RUNS_WITH_URI , Tuple .class ).setParameter (1 , uri )
208
+ try (ScrollableResults <RunFromUri > results = session .createNativeQuery (FIND_RUNS_WITH_URI , Tuple .class )
209
+ .setParameter (1 , uri )
205
210
.setTupleTransformer ((tuple , aliases ) -> {
206
211
RunFromUri r = new RunFromUri ();
207
212
r .id = (int ) tuple [0 ];
208
213
r .testId = (int ) tuple [1 ];
209
214
return r ;
210
215
})
211
216
.setFetchSize (100 )
212
- .scroll (ScrollMode .FORWARD_ONLY );
213
- while (results .next ()) {
214
- RunFromUri r = results .get ();
215
- consumer .accept (r .id , r .testId );
217
+ .scroll (ScrollMode .FORWARD_ONLY )) {
218
+ while (results .next ()) {
219
+ RunFromUri r = results .get ();
220
+ consumer .accept (r .id , r .testId );
221
+ }
216
222
}
217
223
}
218
224
225
+ /**
226
+ * Keep the run_schemas table up to date with the associated schemas
227
+ * If `recalculate` is true, trigger the run recalculation as well.
228
+ * This is not required when creating a new run as the datasets will be
229
+ * created automatically by the process, the recalculation is required when updating
230
+ * the Schema
231
+ * @param runId id of the run
232
+ * @param schemaId id of the schema
233
+ */
219
234
@ WithRoles (extras = Roles .HORREUM_SYSTEM )
220
235
@ Transactional
221
236
void onNewOrUpdatedSchemaForRun (int runId , int schemaId ) {
222
- em .createNativeQuery ("SELECT update_run_schemas(?1)::text" ).setParameter (1 , runId ).getSingleResult ();
223
- //clear validation error tables by schemaId
237
+ updateRunSchemas (runId );
238
+
239
+ // clear validation error tables by schemaId
224
240
em .createNativeQuery ("DELETE FROM dataset_validationerrors WHERE schema_id = ?1" )
225
241
.setParameter (1 , schemaId ).executeUpdate ();
226
242
em .createNativeQuery ("DELETE FROM run_validationerrors WHERE schema_id = ?1" )
227
243
.setParameter (1 , schemaId ).executeUpdate ();
228
244
229
245
Util .registerTxSynchronization (tm , txStatus -> mediator .queueRunRecalculation (runId ));
230
- // transform(runId, true);
246
+ }
247
+
248
+ @ Transactional
249
+ void updateRunSchemas (int runId ) {
250
+ em .createNativeQuery ("SELECT update_run_schemas(?1)::text" ).setParameter (1 , runId ).getSingleResult ();
251
+ }
252
+
253
+ @ Transactional
254
+ public void updateDatasetSchemas (int datasetId ) {
255
+ em .createNativeQuery (UPDATE_DATASET_SCHEMAS ).setParameter (1 , datasetId ).executeUpdate ();
256
+ }
257
+
258
+ @ Transactional
259
+ void clearRunAndDatasetSchemas (int schemaId ) {
260
+ // clear old run and dataset schemas associations
261
+ em .createNativeQuery ("DELETE FROM run_schemas WHERE schemaid = ?1" )
262
+ .setParameter (1 , schemaId ).executeUpdate ();
263
+ em .createNativeQuery ("DELETE FROM dataset_schemas WHERE schema_id = ?1" )
264
+ .setParameter (1 , schemaId ).executeUpdate ();
231
265
}
232
266
233
267
@ PermitAll
@@ -336,13 +370,13 @@ public JsonNode getMetadata(int id, String schemaUri) {
336
370
@ Override
337
371
// TODO: it would be nicer to use @FormParams but fetchival on client side doesn't support that
338
372
public void updateAccess (int id , String owner , Access access ) {
339
- Query query = em .createNativeQuery (CHANGE_ACCESS );
340
- query .setParameter (1 , owner );
341
- query .setParameter (2 , access .ordinal ());
342
- query .setParameter (3 , id );
343
- if (query .executeUpdate () != 1 ) {
373
+ int updatedRecords = RunDAO .update ("owner = ?1, access = ?2 WHERE id = ?3" , owner , access , id );
374
+ if (updatedRecords != 1 ) {
344
375
throw ServiceException .serverError ("Access change failed (missing permissions?)" );
345
376
}
377
+
378
+ // propagate the same change to all datasets belonging to the run
379
+ DatasetDAO .update ("owner = ?1, access = ?2 WHERE run.id = ?3" , owner , access , id );
346
380
}
347
381
348
382
@ RolesAllowed (Roles .UPLOADER )
@@ -670,6 +704,7 @@ public RunPersistence addAuthenticated(RunDAO run, TestDAO test) {
670
704
}
671
705
log .debugf ("Upload flushed, run ID %d" , run .id );
672
706
707
+ updateRunSchemas (run .id );
673
708
mediator .newRun (RunMapper .from (run ));
674
709
List <Integer > datasetIds = transform (run .id , false );
675
710
if (mediator .testMode ())
@@ -991,6 +1026,7 @@ private void trashInternal(int id, boolean trashed) {
991
1026
run .trashed = false ;
992
1027
run .persistAndFlush ();
993
1028
transform (id , true );
1029
+ updateRunSchemas (run .id );
994
1030
} else
995
1031
throw ServiceException .badRequest ("Not possible to un-trash a run that's not referenced to a Test" );
996
1032
}
@@ -1017,7 +1053,8 @@ public void updateDescription(int id, String description) {
1017
1053
throw ServiceException .notFound ("Run not found: " + id );
1018
1054
}
1019
1055
run .description = description ;
1020
- run .persistAndFlush ();
1056
+ // propagate the same change to all datasets belonging to the run
1057
+ DatasetDAO .update ("description = ?1 WHERE run.id = ?2" , description , run .id );
1021
1058
}
1022
1059
1023
1060
@ RolesAllowed (Roles .TESTER )
@@ -1071,7 +1108,7 @@ public Map<Integer, String> updateSchema(int id, String path, String schemaUri)
1071
1108
.distinct ()
1072
1109
.collect (
1073
1110
Collectors .toMap (
1074
- tuple -> (( Integer ) tuple .get ("key" )). intValue ( ),
1111
+ tuple -> (Integer ) tuple .get ("key" ),
1075
1112
tuple -> ((String ) tuple .get ("value" ))));
1076
1113
1077
1114
em .flush ();
@@ -1377,6 +1414,9 @@ List<Integer> transform(int runId, boolean isRecalculation) {
1377
1414
*/
1378
1415
private Integer createDataset (DatasetDAO ds , boolean isRecalculation ) {
1379
1416
ds .persistAndFlush ();
1417
+ // re-create the dataset_schemas associations
1418
+ updateDatasetSchemas (ds .id );
1419
+
1380
1420
if (isRecalculation ) {
1381
1421
try {
1382
1422
Dataset .EventNew event = new Dataset .EventNew (DatasetMapper .from (ds ), true );
0 commit comments