diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..617451fab47e46823c7c47a6a166f25b16698d00 --- /dev/null +++ b/README.md @@ -0,0 +1,12 @@ +# Patched + +It allows to make the zip file even if the files are missing. +It generates a report listing the missing files: + +``` +# Archiving Report +# Restoration completed successfully with warnings + [WARNING] Missing file /data/id30a1/inhouse/opid30a1/20181126/RAW_DATA/AFAMIN/AFAMIN-CD024584_B04-3_2/MXPressA_01/opid30a1-line-AFAMIN-CD024584_B04-3_2-line-AFAMIN-CD024584_B04-3_2_3_2450915.h5 + [WARNING] Missing file /data/id30a1/inhouse/opid30a1/20181126/RAW_DATA/AFAMIN/AFAMIN-CD024584_B04-3_2/MXPressA_01/opid30a1-id30a1-line-AFAMIN-CD024584_B04-3_2_3_2450915.h5 + +``` \ No newline at end of file diff --git a/pom.xml b/pom.xml index 50b1ecfe381acdef62e26741716786c48c4f3812..7ed78188125a4efd092467ec2ef0ae4a1e4e2791 100644 --- a/pom.xml +++ b/pom.xml @@ -11,7 +11,6 @@ UTF-8 UTF-8 https://repo.icatproject.org/repo - ids.storage_test-1.0.0-SNAPSHOT.jar github @@ -32,9 +31,9 @@ scm:git:https://github.com/icatproject/ids.server.git - scm:git:https://github.com/icatproject/ids.server.git + scm:git:git@github.com:icatproject/ids.server.git https://github.com/icatproject/ids.server - v1.8.0 + v1.9.1 @@ -73,7 +72,7 @@ org.icatproject icat.client - 4.8.0 + 4.10.0 @@ -85,7 +84,7 @@ org.icatproject ids.plugin - 1.3.1 + 1.5.0 @@ -161,7 +160,7 @@ - cHJvamVjdHxvcmcuaWNhdHByb2plY3QuaWRzLnNlcnZlcnwyMDE4LTA5LTE1fHRydWV8LTEjTUN3Q0ZGemNNQ0g2VWduNVNvbHFsVS8wOG1rRDN3ZFdBaFFnbHRNSVJvUFNuckRqekNjN3hpQTRGUGJqY3c9PQ== + cHJvamVjdHxvcmcuaWNhdHByb2plY3QuaWRzLnNlcnZlcnwyMDIyLTA0LTAxfGZhbHNlfC0xI01Dd0NGQjFzVFhsRS9CL1llTklZWndCZnB2UHlramcrQWhSYW5NR2JOS2craFZMUzJwRzZtcnZ3WXlEMzB3PT0= site/miredot @@ -198,18 +197,11 @@ Insufficient privileges. - - 404 - explicit: - org.icatproject.ids.exceptions.DataNotOnlineException - Data not online or not found. - - 404 explicit: org.icatproject.ids.exceptions.NotFoundException - Data not online or not found. + Data not found. @@ -239,6 +231,13 @@ org.icatproject.ids.exceptions.NotImplementedException Not implemented. + + + 503 + explicit: + org.icatproject.ids.exceptions.DataNotOnlineException + Data not online. + @@ -330,38 +329,16 @@ 1.4.0 - Undeploy from glassfish - pre-integration-test - - exec - - - asadmin - - undeploy - ids.server-${project.version} - - - 0 - 1 - - - - - Deploy to glassfish + Force install for first test pre-integration-test exec - asadmin + rm - deploy - --deploymentorder - 120 - --libraries - ${storage.plugin} - target/ids.server-${project.version}.war + -f + src/test/install/run.properties @@ -394,6 +371,7 @@ ${serverUrl} ${javax.net.ssl.trustStore} ${containerHome} + ${testHome} diff --git a/src/main/config/logback.xml.example b/src/main/config/logback.xml.example index 94d6f323cac8d34c1ab833beca0932d9039c49d3..21bb62a00b6b7426ffb6d6d0b5e394ee3ba48564 100644 --- a/src/main/config/logback.xml.example +++ b/src/main/config/logback.xml.example @@ -23,4 +23,6 @@ + + diff --git a/src/main/config/run.properties.example b/src/main/config/run.properties.example index 6b13d8fe5fc6afa0be9291ff1e819ae2671a2c86..3bd9a5107208f76b0536435a3d66ec7f453f652d 100644 --- a/src/main/config/run.properties.example +++ b/src/main/config/run.properties.example @@ -18,7 +18,8 @@ maxIdsInQuery = 1000 # Properties for archive storage plugin.archive.class = org.icatproject.ids.storage.ArchiveFileStorage plugin.archive.dir = ${HOME}/ids/archive/ -writeDelaySeconds = 60 +delayDatasetWritesSeconds = 60 +delayDatafileOperationsSeconds = 60 startArchivingLevel1024bytes = 5000000 stopArchivingLevel1024bytes = 4000000 storageUnit = dataset diff --git a/src/main/java/org/icatproject/ids/DataSelection.java b/src/main/java/org/icatproject/ids/DataSelection.java index d5783afc80d8a648efa7d981b33427fe9a9e86d7..cfce15cd04a09c6af168e9714cdf19e271d3a783 100644 --- a/src/main/java/org/icatproject/ids/DataSelection.java +++ b/src/main/java/org/icatproject/ids/DataSelection.java @@ -109,7 +109,7 @@ public class DataSelection { for (Long dfid : dfids) { List dss = icat.search(sessionId, "SELECT ds FROM Dataset ds JOIN ds.datafiles df WHERE df.id = " + dfid - + " INCLUDE ds.investigation.facility"); + + " AND df.location IS NOT NULL INCLUDE ds.investigation.facility"); if (dss.size() == 1) { Dataset ds = (Dataset) dss.get(0); long dsid = ds.getId(); @@ -130,7 +130,7 @@ public class DataSelection { Dataset ds = (Dataset) icat.get(sessionId, "Dataset ds INCLUDE ds.investigation.facility", dsid); dsInfos.put(dsid, new DsInfoImpl(ds)); String query = "SELECT min(df.id), max(df.id), count(df.id) FROM Datafile df WHERE df.dataset.id = " - + dsid; + + dsid + " AND df.location IS NOT NULL"; JsonArray result = Json.createReader(new ByteArrayInputStream(restSession.search(query).getBytes())) .readArray().getJsonArray(0); if (result.getJsonNumber(2).longValueExact() == 0) { // Count 0 @@ -210,7 +210,7 @@ public class DataSelection { visitId, facilityId, facilityName)); query = "SELECT min(df.id), max(df.id), count(df.id) FROM Datafile df WHERE df.dataset.id = " - + dsid; + + dsid + " AND df.location IS NOT NULL"; result = Json.createReader(new ByteArrayInputStream(restSession.search(query).getBytes())) .readArray().getJsonArray(0); if (result.getJsonNumber(2).longValueExact() == 0) { @@ -245,7 +245,7 @@ public class DataSelection { if (count != 0) { if (count <= maxEntities) { String query = "SELECT df.id, df.name, df.location, df.createId, df.modId FROM Datafile df WHERE df.dataset.id = " - + dsid + " AND df.id BETWEEN " + min + " AND " + max; + + dsid + " AND df.location IS NOT NULL AND df.id BETWEEN " + min + " AND " + max; result = Json.createReader(new ByteArrayInputStream(restSession.search(query).getBytes())).readArray(); for (JsonValue tupV : result) { JsonArray tup = (JsonArray) tupV; @@ -257,12 +257,12 @@ public class DataSelection { } else { long half = (min + max) / 2; String query = "SELECT min(df.id), max(df.id), count(df.id) FROM Datafile df WHERE df.dataset.id = " - + dsid + " AND df.id BETWEEN " + min + " AND " + half; + + dsid + " AND df.location IS NOT NULL AND df.id BETWEEN " + min + " AND " + half; result = Json.createReader(new ByteArrayInputStream(restSession.search(query).getBytes())).readArray() .getJsonArray(0); manyDfs(dsid, result); query = "SELECT min(df.id), max(df.id), count(df.id) FROM Datafile df WHERE df.dataset.id = " + dsid - + " AND df.id BETWEEN " + (half + 1) + " AND " + max; + + " AND df.location IS NOT NULL AND df.id BETWEEN " + (half + 1) + " AND " + max; result = Json.createReader(new ByteArrayInputStream(restSession.search(query).getBytes())).readArray() .getJsonArray(0); manyDfs(dsid, result); diff --git a/src/main/java/org/icatproject/ids/FileChecker.java b/src/main/java/org/icatproject/ids/FileChecker.java index 0bf0c8f11fd6032359ceb820ee8481a1349558cf..825ec6becceacc62d6381ebdcf636be31bc694fb 100644 --- a/src/main/java/org/icatproject/ids/FileChecker.java +++ b/src/main/java/org/icatproject/ids/FileChecker.java @@ -75,8 +75,13 @@ public class FileChecker { if (twoLevel) { Dataset ds = (Dataset) eb; logger.info("Checking Dataset " + ds.getId() + " (" + ds.getName() + ")"); - List dfs = ds.getDatafiles(); - if (!dfs.isEmpty()) { + Map crcAndLength = new HashMap<>(); + for (Datafile df : ds.getDatafiles()) { + if (df.getLocation() != null) { + crcAndLength.put(df.getName(), new CrcAndLength(df)); + } + } + if (!crcAndLength.isEmpty()) { String dfName = null; DsInfo dsInfo; @@ -86,15 +91,11 @@ public class FileChecker { report(ds, dfName, "Reports: " + e.getClass().getSimpleName() + " " + e.getMessage()); return; } - Map crcAndLength = new HashMap<>(); Path tPath = null; try { tPath = Files.createTempFile(null, null); archiveStorage.get(dsInfo, tPath); try (ZipInputStream zis = new ZipInputStream(Files.newInputStream(tPath))) { - for (Datafile df : dfs) { - crcAndLength.put(df.getName(), new CrcAndLength(df)); - } ZipEntry ze = zis.getNextEntry(); while (ze != null) { dfName = zipMapper.getFileName(ze.getName()); @@ -231,10 +232,10 @@ public class FileChecker { } } else { if (maxId != null) { - query = "SELECT df FROM Datafile df WHERE df.id > " + maxId + " ORDER BY df.id LIMIT 0, " + query = "SELECT df FROM Datafile df WHERE df.id > " + maxId + " AND df.location IS NOT NULL ORDER BY df.id LIMIT 0, " + filesCheckParallelCount; } else { - query = "SELECT df FROM Datafile df ORDER BY df.id LIMIT 0, " + filesCheckParallelCount; + query = "SELECT df FROM Datafile df WHERE df.location IS NOT NULL ORDER BY df.id LIMIT 0, " + filesCheckParallelCount; } } List os = reader.search(query); diff --git a/src/main/java/org/icatproject/ids/FiniteStateMachine.java b/src/main/java/org/icatproject/ids/FiniteStateMachine.java index 8b2a43033e62da8426b3397dc2ea71464a919f80..344e5a1f0275584433e432c3c9befec3b928d55a 100644 --- a/src/main/java/org/icatproject/ids/FiniteStateMachine.java +++ b/src/main/java/org/icatproject/ids/FiniteStateMachine.java @@ -22,12 +22,18 @@ import java.util.concurrent.ConcurrentHashMap; import javax.annotation.PostConstruct; import javax.annotation.PreDestroy; +import javax.ejb.DependsOn; import javax.ejb.EJB; import javax.ejb.Singleton; import javax.json.Json; import javax.json.stream.JsonGenerator; +import org.icatproject.Dataset; +import org.icatproject.ids.LockManager.Lock; +import org.icatproject.ids.LockManager.LockInfo; +import org.icatproject.ids.LockManager.LockType; import org.icatproject.ids.exceptions.InternalException; +import org.icatproject.ids.plugin.AlreadyLockedException; import org.icatproject.ids.plugin.DfInfo; import org.icatproject.ids.plugin.DsInfo; import org.icatproject.ids.thread.DfArchiver; @@ -41,6 +47,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @Singleton +@DependsOn({ "LockManager" }) public class FiniteStateMachine { private class DfProcessQueue extends TimerTask { @@ -49,42 +56,109 @@ public class FiniteStateMachine { public void run() { try { synchronized (deferredDfOpsQueue) { - if (writeTime != null && System.currentTimeMillis() > writeTime && !deferredDfOpsQueue.isEmpty()) { - writeTime = null; + if (processOpsTime != null && System.currentTimeMillis() > processOpsTime && !deferredDfOpsQueue.isEmpty()) { + processOpsTime = null; logger.debug("deferredDfOpsQueue has " + deferredDfOpsQueue.size() + " entries"); List writes = new ArrayList<>(); List archives = new ArrayList<>(); List restores = new ArrayList<>(); List deletes = new ArrayList<>(); + Map writeLocks = new HashMap<>(); + Map archiveLocks = new HashMap<>(); + Map restoreLocks = new HashMap<>(); + Map deleteLocks = new HashMap<>(); Map newOps = new HashMap<>(); final Iterator> it = deferredDfOpsQueue.entrySet().iterator(); while (it.hasNext()) { Entry opEntry = it.next(); DfInfoImpl dfInfo = opEntry.getKey(); + Long dsId = dfInfo.getDsId(); + DsInfo dsInfo; + try { + Dataset ds = (Dataset) reader.get("Dataset ds INCLUDE ds.investigation.facility", dsId); + dsInfo = new DsInfoImpl(ds); + } catch (Exception e) { + logger.error("Could not get dsInfo {}: {}.", dsId, e.getMessage()); + continue; + } if (!dfChanging.containsKey(dfInfo)) { - it.remove(); final RequestedState state = opEntry.getValue(); logger.debug(dfInfo + " " + state); if (state == RequestedState.WRITE_REQUESTED) { + if (!writeLocks.containsKey(dsId)) { + try { + writeLocks.put(dsId, lockManager.lock(dsInfo, LockType.SHARED)); + } catch (AlreadyLockedException e) { + logger.debug("Could not acquire lock on " + dsId + ", hold back " + state); + continue; + } catch (IOException e) { + logger.error("I/O exception " + e.getMessage() + " locking " + dsId); + continue; + } + } + it.remove(); dfChanging.put(dfInfo, state); writes.add(dfInfo); } else if (state == RequestedState.WRITE_THEN_ARCHIVE_REQUESTED) { + if (!writeLocks.containsKey(dsId)) { + try { + writeLocks.put(dsId, lockManager.lock(dsInfo, LockType.SHARED)); + } catch (AlreadyLockedException e) { + logger.debug("Could not acquire lock on " + dsId + ", hold back " + state); + continue; + } catch (IOException e) { + logger.error("I/O exception " + e.getMessage() + " locking " + dsId); + continue; + } + } + it.remove(); dfChanging.put(dfInfo, RequestedState.WRITE_REQUESTED); writes.add(dfInfo); newOps.put(dfInfo, RequestedState.ARCHIVE_REQUESTED); } else if (state == RequestedState.ARCHIVE_REQUESTED) { - long dsId = dfInfo.getDsId(); - if (isLocked(dsId, QueryLockType.ARCHIVE)) { - logger.debug("Archive of " + dfInfo + " skipped because getData in progress"); - continue; + if (!archiveLocks.containsKey(dsId)) { + try { + archiveLocks.put(dsId, lockManager.lock(dsInfo, LockType.EXCLUSIVE)); + } catch (AlreadyLockedException e) { + logger.debug("Could not acquire lock on " + dsId + ", hold back " + state); + continue; + } catch (IOException e) { + logger.error("I/O exception " + e.getMessage() + " locking " + dsId); + continue; + } } + it.remove(); dfChanging.put(dfInfo, state); archives.add(dfInfo); } else if (state == RequestedState.RESTORE_REQUESTED) { + if (!restoreLocks.containsKey(dsId)) { + try { + restoreLocks.put(dsId, lockManager.lock(dsInfo, LockType.EXCLUSIVE)); + } catch (AlreadyLockedException e) { + logger.debug("Could not acquire lock on " + dsId + ", hold back " + state); + continue; + } catch (IOException e) { + logger.error("I/O exception " + e.getMessage() + " locking " + dsId); + continue; + } + } + it.remove(); dfChanging.put(dfInfo, state); restores.add(dfInfo); } else if (state == RequestedState.DELETE_REQUESTED) { + if (!deleteLocks.containsKey(dsId)) { + try { + deleteLocks.put(dsId, lockManager.lock(dsInfo, LockType.EXCLUSIVE)); + } catch (AlreadyLockedException e) { + logger.debug("Could not acquire lock on " + dsId + ", hold back " + state); + continue; + } catch (IOException e) { + logger.error("I/O exception " + e.getMessage() + " locking " + dsId); + continue; + } + } + it.remove(); dfChanging.put(dfInfo, state); deletes.add(dfInfo); } else { @@ -95,26 +169,28 @@ public class FiniteStateMachine { if (!newOps.isEmpty()) { deferredDfOpsQueue.putAll(newOps); logger.debug("Adding {} operations to be scheduled next time round", newOps.size()); - writeTime = 0L; + } + if (!deferredDfOpsQueue.isEmpty()) { + processOpsTime = 0L; } if (!writes.isEmpty()) { logger.debug("Launch thread to process " + writes.size() + " writes"); - Thread w = new Thread(new DfWriter(writes, propertyHandler, FiniteStateMachine.this)); + Thread w = new Thread(new DfWriter(writes, propertyHandler, FiniteStateMachine.this, writeLocks.values())); w.start(); } if (!archives.isEmpty()) { logger.debug("Launch thread to process " + archives.size() + " archives"); - Thread w = new Thread(new DfArchiver(archives, propertyHandler, FiniteStateMachine.this)); + Thread w = new Thread(new DfArchiver(archives, propertyHandler, FiniteStateMachine.this, archiveLocks.values())); w.start(); } if (!restores.isEmpty()) { logger.debug("Launch thread to process " + restores.size() + " restores"); - Thread w = new Thread(new DfRestorer(restores, propertyHandler, FiniteStateMachine.this)); + Thread w = new Thread(new DfRestorer(restores, propertyHandler, FiniteStateMachine.this, restoreLocks.values())); w.start(); } if (!deletes.isEmpty()) { logger.debug("Launch thread to process " + deletes.size() + " deletes"); - Thread w = new Thread(new DfDeleter(deletes, propertyHandler, FiniteStateMachine.this)); + Thread w = new Thread(new DfDeleter(deletes, propertyHandler, FiniteStateMachine.this, deleteLocks.values())); w.start(); } } @@ -144,36 +220,53 @@ public class FiniteStateMachine { if (state == RequestedState.WRITE_REQUESTED || state == RequestedState.WRITE_THEN_ARCHIVE_REQUESTED) { if (now > writeTimes.get(dsInfo)) { - logger.debug("Will process " + dsInfo + " with " + state); - writeTimes.remove(dsInfo); - dsChanging.put(dsInfo, RequestedState.WRITE_REQUESTED); - it.remove(); - final Thread w = new Thread( - new DsWriter(dsInfo, propertyHandler, FiniteStateMachine.this, reader)); - w.start(); - if (state == RequestedState.WRITE_THEN_ARCHIVE_REQUESTED) { - newOps.put(dsInfo, RequestedState.ARCHIVE_REQUESTED); + try { + Lock lock = lockManager.lock(dsInfo, LockType.SHARED); + logger.debug("Will process " + dsInfo + " with " + state); + writeTimes.remove(dsInfo); + dsChanging.put(dsInfo, RequestedState.WRITE_REQUESTED); + it.remove(); + final Thread w = new Thread( + new DsWriter(dsInfo, propertyHandler, FiniteStateMachine.this, reader, lock)); + w.start(); + if (state == RequestedState.WRITE_THEN_ARCHIVE_REQUESTED) { + newOps.put(dsInfo, RequestedState.ARCHIVE_REQUESTED); + } + } catch (AlreadyLockedException e) { + logger.debug("Could not acquire lock on " + dsInfo + ", hold back process with " + state); + } catch (IOException e) { + logger.error("I/O exception " + e.getMessage() + " locking " + dsInfo); } } } else if (state == RequestedState.ARCHIVE_REQUESTED) { - it.remove(); - long dsId = dsInfo.getDsId(); - if (isLocked(dsId, QueryLockType.ARCHIVE)) { - logger.debug("Archive of " + dsInfo + " skipped because getData in progress"); - continue; + try { + Lock lock = lockManager.lock(dsInfo, LockType.EXCLUSIVE); + it.remove(); + long dsId = dsInfo.getDsId(); + logger.debug("Will process " + dsInfo + " with " + state); + dsChanging.put(dsInfo, state); + final Thread w = new Thread( + new DsArchiver(dsInfo, propertyHandler, FiniteStateMachine.this, lock)); + w.start(); + } catch (AlreadyLockedException e) { + logger.debug("Could not acquire lock on " + dsInfo + ", hold back process with " + state); + } catch (IOException e) { + logger.error("I/O exception " + e.getMessage() + " locking " + dsInfo); } - logger.debug("Will process " + dsInfo + " with " + state); - dsChanging.put(dsInfo, state); - final Thread w = new Thread( - new DsArchiver(dsInfo, propertyHandler, FiniteStateMachine.this)); - w.start(); } else if (state == RequestedState.RESTORE_REQUESTED) { - logger.debug("Will process " + dsInfo + " with " + state); - dsChanging.put(dsInfo, state); - it.remove(); - final Thread w = new Thread( - new DsRestorer(dsInfo, propertyHandler, FiniteStateMachine.this, reader)); - w.start(); + try { + Lock lock = lockManager.lock(dsInfo, LockType.EXCLUSIVE); + logger.debug("Will process " + dsInfo + " with " + state); + dsChanging.put(dsInfo, state); + it.remove(); + final Thread w = new Thread( + new DsRestorer(dsInfo, propertyHandler, FiniteStateMachine.this, reader, lock)); + w.start(); + } catch (AlreadyLockedException e) { + logger.debug("Could not acquire lock on " + dsInfo + ", hold back process with " + state); + } catch (IOException e) { + logger.error("I/O exception " + e.getMessage() + " locking " + dsInfo); + } } } } @@ -192,17 +285,13 @@ public class FiniteStateMachine { ARCHIVE_REQUESTED, DELETE_REQUESTED, RESTORE_REQUESTED, WRITE_REQUESTED, WRITE_THEN_ARCHIVE_REQUESTED } - public enum SetLockType { - ARCHIVE, ARCHIVE_AND_DELETE - } - - public enum QueryLockType { - ARCHIVE, DELETE - } - private static Logger logger = LoggerFactory.getLogger(FiniteStateMachine.class); - private long archiveWriteDelayMillis; + /* + * Note that the veriable processOpsDelayMillis is used to either delay all deferred + * datafile operations or to delay dataset writes, depending on the setting of storageUnit. + */ + private long processOpsDelayMillis; private Map deferredDfOpsQueue = new HashMap<>(); @@ -212,9 +301,6 @@ public class FiniteStateMachine { private Map dsChanging = new HashMap<>(); - private Map> deleteLocks = new HashMap<>(); - private Map> archiveLocks = new HashMap<>(); - private Path markerDir; private long processQueueIntervalMillis; @@ -222,17 +308,14 @@ public class FiniteStateMachine { @EJB IcatReader reader; - private StorageUnit storageUnit; + @EJB + private LockManager lockManager; - private boolean synchLocksOnDataset; + private StorageUnit storageUnit; private Timer timer = new Timer("FSM Timer"); - /* - * Note that the variable writeTime has been abused and now also delays - * restore operations TODO - */ - private Long writeTime; + private Long processOpsTime; private Map writeTimes = new HashMap<>(); @@ -318,65 +401,28 @@ public class FiniteStateMachine { public String getServiceStatus() throws InternalException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); - if (storageUnit == null) { - try (JsonGenerator gen = Json.createGenerator(baos).writeStartObject()) { + try (JsonGenerator gen = Json.createGenerator(baos).writeStartObject()) { + if (storageUnit == null) { gen.writeStartArray("opsQueue").writeEnd(); - gen.write("lockCount", 0); - gen.writeStartArray("lockedIds").writeEnd(); - gen.writeStartArray("failures").writeEnd(); - gen.writeEnd(); // end Object() - } - } else if (storageUnit == StorageUnit.DATASET) { - Map union; - Collection> locksContentsClone; - synchronized (deferredDsOpsQueue) { - union = new HashMap<>(dsChanging); - union.putAll(deferredDsOpsQueue); - locksContentsClone = new HashSet<>(archiveLocks.values()); - locksContentsClone.addAll(deleteLocks.values()); - } - try (JsonGenerator gen = Json.createGenerator(baos).writeStartObject()) { + } else if (storageUnit == StorageUnit.DATASET) { + Map union; + synchronized (deferredDsOpsQueue) { + union = new HashMap<>(dsChanging); + union.putAll(deferredDsOpsQueue); + } gen.writeStartArray("opsQueue"); for (Entry entry : union.entrySet()) { DsInfo item = entry.getKey(); - gen.writeStartObject().write("data", item.toString()).write("request", entry.getValue().name()) .writeEnd(); - } gen.writeEnd(); // end Array("opsQueue") - - gen.write("lockCount", locksContentsClone.size()); - - Set lockedDs = new HashSet<>(); - - for (Set entry : locksContentsClone) { - lockedDs.addAll(entry); - } - gen.writeStartArray("lockedIds"); - for (Long dsId : lockedDs) { - gen.write(dsId); - } - gen.writeEnd(); // end Array("lockedDs") - - gen.writeStartArray("failures"); - for (Long failure : failures) { - gen.write(failure); + } else if (storageUnit == StorageUnit.DATAFILE) { + Map union; + synchronized (deferredDfOpsQueue) { + union = new HashMap<>(dfChanging); + union.putAll(deferredDfOpsQueue); } - gen.writeEnd(); // end Array("failures") - - gen.writeEnd(); // end Object() - } - } else if (storageUnit == StorageUnit.DATAFILE) { - Map union; - Collection> locksContentsClone; - synchronized (deferredDfOpsQueue) { - union = new HashMap<>(dfChanging); - union.putAll(deferredDfOpsQueue); - locksContentsClone = new HashSet<>(archiveLocks.values()); - locksContentsClone.addAll(deleteLocks.values()); - } - try (JsonGenerator gen = Json.createGenerator(baos).writeStartObject()) { gen.writeStartArray("opsQueue"); for (Entry entry : union.entrySet()) { DfInfo item = entry.getKey(); @@ -384,50 +430,41 @@ public class FiniteStateMachine { .writeEnd(); } gen.writeEnd(); // end Array("opsQueue") + } - gen.write("lockCount", locksContentsClone.size()); - - Set lockedDs = new HashSet<>(); - - for (Set entry : locksContentsClone) { - lockedDs.addAll(entry); - } - gen.writeStartArray("lockedIds"); - for (Long dsId : lockedDs) { - gen.write(dsId); - } - gen.writeEnd(); // end Array("lockedDs") - - gen.writeStartArray("failures"); - for (Long failure : failures) { - gen.write(failure); - } - gen.writeEnd(); // end Array("failures") + Collection lockInfo = lockManager.getLockInfo(); + gen.write("lockCount", lockInfo.size()); + gen.writeStartArray("locks"); + for (LockInfo li : lockInfo) { + gen.writeStartObject().write("id", li.id).write("type", li.type.name()).write("count", li.count).writeEnd(); + } + gen.writeEnd(); // end Array("locks") - gen.writeEnd(); // end Object() + gen.writeStartArray("failures"); + for (Long failure : failures) { + gen.write(failure); } + gen.writeEnd(); // end Array("failures") + + gen.writeEnd(); // end Object() } return baos.toString(); - } @PostConstruct private void init() { try { propertyHandler = PropertyHandler.getInstance(); - archiveWriteDelayMillis = propertyHandler.getWriteDelaySeconds() * 1000L; processQueueIntervalMillis = propertyHandler.getProcessQueueIntervalSeconds() * 1000L; storageUnit = propertyHandler.getStorageUnit(); if (storageUnit == StorageUnit.DATASET) { + processOpsDelayMillis = propertyHandler.getDelayDatasetWrites() * 1000L; timer.schedule(new DsProcessQueue(), processQueueIntervalMillis); logger.info("DsProcessQueue scheduled to run in " + processQueueIntervalMillis + " milliseconds"); - synchLocksOnDataset = true; } else if (storageUnit == StorageUnit.DATAFILE) { + processOpsDelayMillis = propertyHandler.getDelayDatafileOperations() * 1000L; timer.schedule(new DfProcessQueue(), processQueueIntervalMillis); logger.info("DfProcessQueue scheduled to run in " + processQueueIntervalMillis + " milliseconds"); - synchLocksOnDataset = false; - } else { - synchLocksOnDataset = true; } markerDir = propertyHandler.getCacheDir().resolve("marker"); Files.createDirectories(markerDir); @@ -436,63 +473,14 @@ public class FiniteStateMachine { } } - public boolean isLocked(long dsId, QueryLockType lockType) { - if (synchLocksOnDataset) { - synchronized (deferredDsOpsQueue) { - return locked(dsId, lockType); - } - } else { - synchronized (deferredDfOpsQueue) { - return locked(dsId, lockType); - } - } - } - - public String lock(Set set, SetLockType lockType) { - String lockId = UUID.randomUUID().toString(); - if (synchLocksOnDataset) { - synchronized (deferredDsOpsQueue) { - archiveLocks.put(lockId, set); - if (lockType == SetLockType.ARCHIVE_AND_DELETE) { - deleteLocks.put(lockId, set); - } - } - } else { - synchronized (deferredDfOpsQueue) { - archiveLocks.put(lockId, set); - if (lockType == SetLockType.ARCHIVE_AND_DELETE) { - deleteLocks.put(lockId, set); - } - } - } - return lockId; - } - - private boolean locked(long dsId, QueryLockType lockType) { - if (lockType == QueryLockType.ARCHIVE) { - for (Set lock : archiveLocks.values()) { - if (lock.contains(dsId)) { - return true; - } - } - } else if (lockType == QueryLockType.DELETE) { - for (Set lock : deleteLocks.values()) { - if (lock.contains(dsId)) { - return true; - } - } - } - return false; - } - public void queue(DfInfoImpl dfInfo, DeferredOp deferredOp) throws InternalException { logger.info("Requesting " + deferredOp + " of datafile " + dfInfo); synchronized (deferredDfOpsQueue) { - if (writeTime == null) { - writeTime = System.currentTimeMillis() + archiveWriteDelayMillis; - final Date d = new Date(writeTime); + if (processOpsTime == null) { + processOpsTime = System.currentTimeMillis() + processOpsDelayMillis; + final Date d = new Date(processOpsTime); logger.debug("Requesting delay operations till " + d); } @@ -617,31 +605,13 @@ public class FiniteStateMachine { } private void setDelay(DsInfo dsInfo) { - writeTimes.put(dsInfo, System.currentTimeMillis() + archiveWriteDelayMillis); + writeTimes.put(dsInfo, System.currentTimeMillis() + processOpsDelayMillis); if (logger.isDebugEnabled()) { final Date d = new Date(writeTimes.get(dsInfo)); logger.debug("Requesting delay of writing of dataset " + dsInfo + " till " + d); } } - public void unlock(String lockId, SetLockType lockType) { - if (synchLocksOnDataset) { - synchronized (deferredDsOpsQueue) { - archiveLocks.remove(lockId); - if (lockType == SetLockType.ARCHIVE_AND_DELETE) { - deleteLocks.remove(lockId); - } - } - } else { - synchronized (deferredDfOpsQueue) { - archiveLocks.remove(lockId); - if (lockType == SetLockType.ARCHIVE_AND_DELETE) { - deleteLocks.remove(lockId); - } - } - } - } - public void recordSuccess(Long id) { if (failures.remove(id)) { logger.debug("Marking {} OK", id); diff --git a/src/main/java/org/icatproject/ids/IdsBean.java b/src/main/java/org/icatproject/ids/IdsBean.java index f2bc5ecbb0c76c6ea8c64f23f4a7844cdba9453e..27b16898ea37c3f3ad06788b3f538af5c135666e 100644 --- a/src/main/java/org/icatproject/ids/IdsBean.java +++ b/src/main/java/org/icatproject/ids/IdsBean.java @@ -61,7 +61,8 @@ import org.icatproject.ICAT; import org.icatproject.IcatExceptionType; import org.icatproject.IcatException_Exception; import org.icatproject.ids.DataSelection.Returns; -import org.icatproject.ids.FiniteStateMachine.SetLockType; +import org.icatproject.ids.LockManager.Lock; +import org.icatproject.ids.LockManager.LockType; import org.icatproject.ids.exceptions.BadRequestException; import org.icatproject.ids.exceptions.DataNotOnlineException; import org.icatproject.ids.exceptions.IdsException; @@ -69,6 +70,7 @@ import org.icatproject.ids.exceptions.InsufficientPrivilegesException; import org.icatproject.ids.exceptions.InternalException; import org.icatproject.ids.exceptions.NotFoundException; import org.icatproject.ids.exceptions.NotImplementedException; +import org.icatproject.ids.plugin.AlreadyLockedException; import org.icatproject.ids.plugin.ArchiveStorageInterface; import org.icatproject.ids.plugin.DfInfo; import org.icatproject.ids.plugin.DsInfo; @@ -137,11 +139,7 @@ public class IdsBean { @Override public Void call() throws Exception { for (DfInfoImpl dfInfo : dfInfos) { - try { - restoreIfOffline(dfInfo); - } catch (IOException e) { - logger.error("I/O error " + e.getMessage() + " for " + dfInfo); - } + restoreIfOffline(dfInfo); } return null; } @@ -160,11 +158,7 @@ public class IdsBean { @Override public Void call() throws Exception { for (DsInfo dsInfo : dsInfos) { - try { - restoreIfOffline(dsInfo, emptyDs); - } catch (IOException e) { - logger.error("I/O error " + e.getMessage() + " for " + dsInfo); - } + restoreIfOffline(dsInfo, emptyDs); } return null; } @@ -175,7 +169,7 @@ public class IdsBean { private long offset; private boolean zip; private Map dsInfos; - private String lockId; + private Lock lock; private boolean compress; private Set dfInfos; private String ip; @@ -183,12 +177,12 @@ public class IdsBean { private Long transferId; SO(Map dsInfos, Set dfInfos, long offset, boolean zip, boolean compress, - String lockId, Long transferId, String ip, long start) { + Lock lock, Long transferId, String ip, long start) { this.offset = offset; this.zip = zip; this.dsInfos = dsInfos; this.dfInfos = dfInfos; - this.lockId = lockId; + this.lock = lock; this.compress = compress; this.transferId = transferId; this.ip = ip; @@ -291,7 +285,7 @@ public class IdsBean { logger.error("Failed to stream " + transfer + " due to " + e.getMessage()); throw e; } finally { - fsm.unlock(lockId, FiniteStateMachine.SetLockType.ARCHIVE_AND_DELETE); + lock.release(); } } @@ -359,7 +353,7 @@ public class IdsBean { public static String getLocation(long dfid, String location) throws InsufficientPrivilegesException, InternalException { if (location == null) { - throw new InsufficientPrivilegesException("location null"); + throw new InternalException("location is null"); } if (key == null) { return location; @@ -497,6 +491,9 @@ public class IdsBean { @EJB private FiniteStateMachine fsm; + @EJB + private LockManager lockManager; + private ICAT icat; private Path linkDir; @@ -606,7 +603,7 @@ public class IdsBean { } } - private void checkDatafilesPresent(Set dfInfos, String lockId) + private void checkDatafilesPresent(Set dfInfos) throws NotFoundException, InternalException { /* Check that datafiles have not been deleted before locking */ int n = 0; @@ -619,13 +616,11 @@ public class IdsBean { if (++n == maxIdsInQuery) { try { if (((Long) reader.search(sb.append("))").toString()).get(0)).intValue() != n) { - fsm.unlock(lockId, FiniteStateMachine.SetLockType.ARCHIVE_AND_DELETE); throw new NotFoundException("One of the data files requested has been deleted"); } n = 0; sb = new StringBuffer("SELECT COUNT(df) from Datafile df WHERE (df.id in ("); } catch (IcatException_Exception e) { - fsm.unlock(lockId, FiniteStateMachine.SetLockType.ARCHIVE_AND_DELETE); throw new InternalException(e.getFaultInfo().getType() + " " + e.getMessage()); } } @@ -633,53 +628,41 @@ public class IdsBean { if (n != 0) { try { if (((Long) reader.search(sb.append("))").toString()).get(0)).intValue() != n) { - fsm.unlock(lockId, FiniteStateMachine.SetLockType.ARCHIVE_AND_DELETE); throw new NotFoundException("One of the datafiles requested has been deleted"); } } catch (IcatException_Exception e) { - fsm.unlock(lockId, FiniteStateMachine.SetLockType.ARCHIVE_AND_DELETE); throw new InternalException(e.getFaultInfo().getType() + " " + e.getMessage()); } } } - private void checkOnlineAndFreeLockOnFailure(Collection dsInfos, Set emptyDatasets, - Set dfInfos, String lockId, SetLockType lockType) + private void checkOnline(Collection dsInfos, Set emptyDatasets, + Set dfInfos) throws InternalException, DataNotOnlineException { - try { - if (storageUnit == StorageUnit.DATASET) { - boolean maybeOffline = false; - for (DsInfo dsInfo : dsInfos) { - if (restoreIfOffline(dsInfo, emptyDatasets)) { - maybeOffline = true; - } - } - if (maybeOffline) { - fsm.unlock(lockId, lockType); - throw new DataNotOnlineException( - "Before putting, getting or deleting a datafile, its dataset has to be restored, restoration requested automatically"); - } - } else if (storageUnit == StorageUnit.DATAFILE) { - boolean maybeOffline = false; - for (DfInfoImpl dfInfo : dfInfos) { - if (restoreIfOffline(dfInfo)) { - maybeOffline = true; - } - + if (storageUnit == StorageUnit.DATASET) { + boolean maybeOffline = false; + for (DsInfo dsInfo : dsInfos) { + if (restoreIfOffline(dsInfo, emptyDatasets)) { + maybeOffline = true; } - if (maybeOffline) { - fsm.unlock(lockId, lockType); - throw new DataNotOnlineException( - "Before getting a datafile, it must be restored, restoration requested automatically"); + } + if (maybeOffline) { + throw new DataNotOnlineException( + "Before putting, getting or deleting a datafile, its dataset has to be restored, restoration requested automatically"); + } + } else if (storageUnit == StorageUnit.DATAFILE) { + boolean maybeOffline = false; + for (DfInfoImpl dfInfo : dfInfos) { + if (restoreIfOffline(dfInfo)) { + maybeOffline = true; } } - } catch (IOException e) { - fsm.unlock(lockId, lockType); - logger.error("I/O error " + e.getMessage() + " checking online"); - throw new InternalException(e.getClass() + " " + e.getMessage()); + if (maybeOffline) { + throw new DataNotOnlineException( + "Before getting a datafile, it must be restored, restoration requested automatically"); + } } - } public void delete(String sessionId, String investigationIds, String datasetIds, String datafileIds, String ip) @@ -704,28 +687,9 @@ public class IdsBean { Collection dsInfos = dataSelection.getDsInfo().values(); Set dfInfos = dataSelection.getDfInfo(); - String lockId = null; - if (storageUnit == StorageUnit.DATASET) { - /* - * Lock the datasets to prevent archiving of the datasets. It is - * important that they be unlocked again. - */ - Set dsIds = new HashSet<>(); - for (DsInfo dsInfo : dsInfos) { - dsIds.add(dsInfo.getDsId()); - } - lockId = fsm.lock(dsIds, FiniteStateMachine.SetLockType.ARCHIVE); - checkOnlineAndFreeLockOnFailure(dsInfos, dataSelection.getEmptyDatasets(), dfInfos, lockId, - FiniteStateMachine.SetLockType.ARCHIVE); - } - - try { - for (DsInfo dsInfo : dsInfos) { - logger.debug("DS " + dsInfo.getDsId() + " " + dsInfo); - if (fsm.isLocked(dsInfo.getDsId(), FiniteStateMachine.QueryLockType.DELETE)) { - throw new BadRequestException( - "Dataset " + dsInfo + " (or a part of it) is currently being streamed to a user"); - } + try (Lock lock = lockManager.lock(dsInfos, LockType.EXCLUSIVE)) { + if (storageUnit == StorageUnit.DATASET) { + checkOnline(dsInfos, dataSelection.getEmptyDatasets(), dfInfos); } /* Now delete from ICAT */ @@ -758,7 +722,7 @@ public class IdsBean { String location = dfInfo.getDfLocation(); try { if ((long) reader - .search("SELECT COUNT(df) FROM Datafile df WHERE df.location LIKE '" + location + "%'") + .search("SELECT COUNT(df) FROM Datafile df WHERE df.location LIKE '" + location.replaceAll("'", "''") + "%'") .get(0) == 0) { if (mainStorage.exists(location)) { logger.debug("Delete physical file " + location + " from main storage"); @@ -782,10 +746,12 @@ public class IdsBean { } } - } finally { - if (lockId != null) { - fsm.unlock(lockId, FiniteStateMachine.SetLockType.ARCHIVE); - } + } catch (AlreadyLockedException e) { + logger.debug("Could not acquire lock, delete failed"); + throw new DataNotOnlineException("Data is busy"); + } catch (IOException e) { + logger.error("I/O error " + e.getMessage()); + throw new InternalException(e.getClass() + " " + e.getMessage()); } if (logSet.contains(CallType.WRITE)) { @@ -831,57 +797,65 @@ public class IdsBean { final Map dsInfos = prepared.dsInfos; Set emptyDatasets = prepared.emptyDatasets; - /* - * Lock the datasets which prevents deletion of datafiles within the - * dataset and archiving of the datasets. It is important that they be - * unlocked again. - */ - final String lockId = fsm.lock(dsInfos.keySet(), FiniteStateMachine.SetLockType.ARCHIVE_AND_DELETE); - - if (twoLevel) { - checkOnlineAndFreeLockOnFailure(dsInfos.values(), emptyDatasets, dfInfos, lockId, - FiniteStateMachine.SetLockType.ARCHIVE_AND_DELETE); - } - - checkDatafilesPresent(dfInfos, lockId); + Lock lock = null; + try { + lock = lockManager.lock(dsInfos.values(), LockType.SHARED); - /* Construct the name to include in the headers */ - String name; - if (outname == null) { - if (zip) { - name = new SimpleDateFormat("yyyy-MM-dd_HH-mm-ss").format(new Date()) + ".zip"; - } else { - name = dfInfos.iterator().next().getDfName(); + if (twoLevel) { + checkOnline(dsInfos.values(), emptyDatasets, dfInfos); } - } else { - if (zip) { - String ext = outname.substring(outname.lastIndexOf(".") + 1, outname.length()); - if ("zip".equals(ext)) { - name = outname; + checkDatafilesPresent(dfInfos); + + /* Construct the name to include in the headers */ + String name; + if (outname == null) { + if (zip) { + name = new SimpleDateFormat("yyyy-MM-dd_HH-mm-ss").format(new Date()) + ".zip"; } else { - name = outname + ".zip"; + name = dfInfos.iterator().next().getDfName(); } } else { - name = outname; + if (zip) { + String ext = outname.substring(outname.lastIndexOf(".") + 1, outname.length()); + if ("zip".equals(ext)) { + name = outname; + } else { + name = outname + ".zip"; + } + } else { + name = outname; + } } - } - Long transferId = null; - if (logSet.contains(CallType.READ)) { - transferId = atomicLong.getAndIncrement(); - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - try (JsonGenerator gen = Json.createGenerator(baos).writeStartObject()) { - gen.write("transferId", transferId); - gen.write("preparedId", preparedId); - gen.writeEnd(); + Long transferId = null; + if (logSet.contains(CallType.READ)) { + transferId = atomicLong.getAndIncrement(); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (JsonGenerator gen = Json.createGenerator(baos).writeStartObject()) { + gen.write("transferId", transferId); + gen.write("preparedId", preparedId); + gen.writeEnd(); + } + transmitter.processMessage("getDataStart", ip, baos.toString(), time); } - transmitter.processMessage("getDataStart", ip, baos.toString(), time); - } - return Response.status(offset == 0 ? HttpURLConnection.HTTP_OK : HttpURLConnection.HTTP_PARTIAL) - .entity(new SO(dsInfos, dfInfos, offset, zip, compress, lockId, transferId, ip, time)) - .header("Content-Disposition", "attachment; filename=\"" + name + "\"").header("Accept-Ranges", "bytes") - .build(); + return Response.status(offset == 0 ? HttpURLConnection.HTTP_OK : HttpURLConnection.HTTP_PARTIAL) + .entity(new SO(dsInfos, dfInfos, offset, zip, compress, lock, transferId, ip, time)) + .header("Content-Disposition", "attachment; filename=\"" + name + "\"").header("Accept-Ranges", "bytes") + .build(); + } catch (AlreadyLockedException e) { + logger.debug("Could not acquire lock, getData failed"); + throw new DataNotOnlineException("Data is busy"); + } catch (IOException e) { + if (lock != null) { + lock.release(); + } + logger.error("I/O error " + e.getMessage()); + throw new InternalException(e.getClass() + " " + e.getMessage()); + } catch (IdsException e) { + lock.release(); + throw e; + } } public Response getData(String sessionId, String investigationIds, String datasetIds, String datafileIds, @@ -904,66 +878,73 @@ public class IdsBean { Map dsInfos = dataSelection.getDsInfo(); Set dfInfos = dataSelection.getDfInfo(); - /* - * Lock the datasets which prevents deletion of datafiles within the - * dataset and archiving of the datasets. It is important that they be - * unlocked again. - */ - - final String lockId = fsm.lock(dsInfos.keySet(), FiniteStateMachine.SetLockType.ARCHIVE_AND_DELETE); - - if (twoLevel) { - checkOnlineAndFreeLockOnFailure(dsInfos.values(), dataSelection.getEmptyDatasets(), dfInfos, lockId, - FiniteStateMachine.SetLockType.ARCHIVE_AND_DELETE); - } + Lock lock = null; + try { + lock = lockManager.lock(dsInfos.values(), LockType.SHARED); - checkDatafilesPresent(dfInfos, lockId); + if (twoLevel) { + checkOnline(dsInfos.values(), dataSelection.getEmptyDatasets(), dfInfos); + } + checkDatafilesPresent(dfInfos); - final boolean finalZip = zip ? true : dataSelection.mustZip(); + final boolean finalZip = zip ? true : dataSelection.mustZip(); - /* Construct the name to include in the headers */ - String name; - if (outname == null) { - if (finalZip) { - name = new SimpleDateFormat("yyyy-MM-dd_HH-mm-ss").format(new Date()) + ".zip"; - } else { - name = dataSelection.getDfInfo().iterator().next().getDfName(); - } - } else { - if (finalZip) { - String ext = outname.substring(outname.lastIndexOf(".") + 1, outname.length()); - if ("zip".equals(ext)) { - name = outname; + /* Construct the name to include in the headers */ + String name; + if (outname == null) { + if (finalZip) { + name = new SimpleDateFormat("yyyy-MM-dd_HH-mm-ss").format(new Date()) + ".zip"; } else { - name = outname + ".zip"; + name = dataSelection.getDfInfo().iterator().next().getDfName(); } } else { - name = outname; + if (finalZip) { + String ext = outname.substring(outname.lastIndexOf(".") + 1, outname.length()); + if ("zip".equals(ext)) { + name = outname; + } else { + name = outname + ".zip"; + } + } else { + name = outname; + } } - } - Long transferId = null; - if (logSet.contains(CallType.READ)) { - try { - transferId = atomicLong.getAndIncrement(); - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - try (JsonGenerator gen = Json.createGenerator(baos).writeStartObject()) { - gen.write("transferId", transferId); - gen.write("userName", icat.getUserName(sessionId)); - addIds(gen, investigationIds, datasetIds, datafileIds); - gen.writeEnd(); + Long transferId = null; + if (logSet.contains(CallType.READ)) { + try { + transferId = atomicLong.getAndIncrement(); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (JsonGenerator gen = Json.createGenerator(baos).writeStartObject()) { + gen.write("transferId", transferId); + gen.write("userName", icat.getUserName(sessionId)); + addIds(gen, investigationIds, datasetIds, datafileIds); + gen.writeEnd(); + } + transmitter.processMessage("getDataStart", ip, baos.toString(), start); + } catch (IcatException_Exception e) { + logger.error("Failed to prepare jms message " + e.getClass() + " " + e.getMessage()); } - transmitter.processMessage("getDataStart", ip, baos.toString(), start); - } catch (IcatException_Exception e) { - logger.error("Failed to prepare jms message " + e.getClass() + " " + e.getMessage()); } - } - return Response.status(offset == 0 ? HttpURLConnection.HTTP_OK : HttpURLConnection.HTTP_PARTIAL) - .entity(new SO(dataSelection.getDsInfo(), dataSelection.getDfInfo(), offset, finalZip, compress, lockId, - transferId, ip, start)) - .header("Content-Disposition", "attachment; filename=\"" + name + "\"").header("Accept-Ranges", "bytes") - .build(); + return Response.status(offset == 0 ? HttpURLConnection.HTTP_OK : HttpURLConnection.HTTP_PARTIAL) + .entity(new SO(dataSelection.getDsInfo(), dataSelection.getDfInfo(), offset, finalZip, compress, lock, + transferId, ip, start)) + .header("Content-Disposition", "attachment; filename=\"" + name + "\"").header("Accept-Ranges", "bytes") + .build(); + } catch (AlreadyLockedException e) { + logger.debug("Could not acquire lock, getData failed"); + throw new DataNotOnlineException("Data is busy"); + } catch (IOException e) { + if (lock != null) { + lock.release(); + } + logger.error("I/O error " + e.getMessage()); + throw new InternalException(e.getClass() + " " + e.getMessage()); + } catch (IdsException e) { + lock.release(); + throw e; + } } public String getDatafileIds(String preparedId, String ip) @@ -1103,12 +1084,15 @@ public class IdsBean { throw new BadRequestException(e.getMessage()); } } + if (datafile.getLocation() == null) { + throw new NotFoundException("Datafile not found"); + } String location = getLocation(datafile.getId(), datafile.getLocation()); + DsInfo dsInfo = new DsInfoImpl(datafile.getDataset()); - try { + try (Lock lock = lockManager.lock(dsInfo, LockType.SHARED)) { if (storageUnit == StorageUnit.DATASET) { - DsInfo dsInfo = new DsInfoImpl(datafile.getDataset()); Set mt = Collections.emptySet(); if (restoreIfOffline(dsInfo, mt)) { throw new DataNotOnlineException( @@ -1122,12 +1106,7 @@ public class IdsBean { "Before linking a datafile, it has to be restored, restoration requested automatically"); } } - } catch (IOException e) { - logger.error("I/O error " + e.getMessage() + " linking " + location + " from MainStorage"); - throw new InternalException(e.getClass() + " " + e.getMessage()); - } - try { Path target = mainStorage.getPath(location, datafile.getCreateId(), datafile.getModId()); ShellCommand sc = new ShellCommand("setfacl", "-m", "user:" + username + ":r", target.toString()); if (sc.getExitValue() != 0) { @@ -1152,6 +1131,9 @@ public class IdsBean { } return link.toString(); + } catch (AlreadyLockedException e) { + logger.debug("Could not acquire lock, getLink failed"); + throw new DataNotOnlineException("Data is busy"); } catch (IOException e) { logger.error("I/O error " + e.getMessage() + " linking " + location + " from MainStorage"); throw new InternalException(e.getClass() + " " + e.getMessage()); @@ -1220,7 +1202,10 @@ public class IdsBean { if (size == 0) { try { if (dfids.size() != 0) { - icat.get(sessionId, "Datafile", dfids.get(0)); + Datafile datafile = (Datafile) icat.get(sessionId, "Datafile", dfids.get(0)); + if (datafile.getLocation() == null) { + throw new NotFoundException("Datafile not found"); + } } if (dsids.size() != 0) { icat.get(sessionId, "Dataset", dsids.get(0)); @@ -1299,7 +1284,7 @@ public class IdsBean { } private long getSizeFor(String sessionId, StringBuilder sb) throws InternalException { - String query = "SELECT SUM(df.fileSize) from Datafile df WHERE df.id IN (" + sb.toString() + ")"; + String query = "SELECT SUM(df.fileSize) from Datafile df WHERE df.id IN (" + sb.toString() + ") AND df.location IS NOT NULL"; try { return (Long) icat.search(sessionId, query).get(0); } catch (IcatException_Exception e) { @@ -1310,7 +1295,7 @@ public class IdsBean { } private long evalSizeFor(String sessionId, String where, StringBuilder sb) throws InternalException { - String query = "SELECT SUM(df.fileSize) from Datafile df WHERE " + where + " IN (" + sb.toString() + ")"; + String query = "SELECT SUM(df.fileSize) from Datafile df WHERE " + where + " IN (" + sb.toString() + ") AND df.location IS NOT NULL"; logger.debug("icat query for size: {}", query); try { return (Long) icat.search(sessionId, query).get(0); @@ -1344,53 +1329,48 @@ public class IdsBean { // Do it Status status = Status.ONLINE; - try { - if (storageUnit == StorageUnit.DATASET) { - DataSelection dataSelection = new DataSelection(icat, sessionId, investigationIds, datasetIds, - datafileIds, Returns.DATASETS); - Map dsInfos = dataSelection.getDsInfo(); + if (storageUnit == StorageUnit.DATASET) { + DataSelection dataSelection = new DataSelection(icat, sessionId, investigationIds, datasetIds, + datafileIds, Returns.DATASETS); + Map dsInfos = dataSelection.getDsInfo(); - Set restoring = fsm.getDsRestoring(); - Set maybeOffline = fsm.getDsMaybeOffline(); - Set emptyDatasets = dataSelection.getEmptyDatasets(); - for (DsInfo dsInfo : dsInfos.values()) { - fsm.checkFailure(dsInfo.getDsId()); - if (restoring.contains(dsInfo)) { - status = Status.RESTORING; - } else if (maybeOffline.contains(dsInfo)) { - status = Status.ARCHIVED; - break; - } else if (!emptyDatasets.contains(dsInfo.getDsId()) && !mainStorage.exists(dsInfo)) { - status = Status.ARCHIVED; - break; - } + Set restoring = fsm.getDsRestoring(); + Set maybeOffline = fsm.getDsMaybeOffline(); + Set emptyDatasets = dataSelection.getEmptyDatasets(); + for (DsInfo dsInfo : dsInfos.values()) { + fsm.checkFailure(dsInfo.getDsId()); + if (restoring.contains(dsInfo)) { + status = Status.RESTORING; + } else if (maybeOffline.contains(dsInfo)) { + status = Status.ARCHIVED; + break; + } else if (!emptyDatasets.contains(dsInfo.getDsId()) && !mainStorage.exists(dsInfo)) { + status = Status.ARCHIVED; + break; } - } else if (storageUnit == StorageUnit.DATAFILE) { - DataSelection dataSelection = new DataSelection(icat, sessionId, investigationIds, datasetIds, - datafileIds, Returns.DATAFILES); - Set dfInfos = dataSelection.getDfInfo(); + } + } else if (storageUnit == StorageUnit.DATAFILE) { + DataSelection dataSelection = new DataSelection(icat, sessionId, investigationIds, datasetIds, + datafileIds, Returns.DATAFILES); + Set dfInfos = dataSelection.getDfInfo(); - Set restoring = fsm.getDfRestoring(); - Set maybeOffline = fsm.getDfMaybeOffline(); - for (DfInfo dfInfo : dfInfos) { - fsm.checkFailure(dfInfo.getDfId()); - if (restoring.contains(dfInfo)) { - status = Status.RESTORING; - } else if (maybeOffline.contains(dfInfo)) { - status = Status.ARCHIVED; - break; - } else if (!mainStorage.exists(dfInfo.getDfLocation())) { - status = Status.ARCHIVED; - break; - } + Set restoring = fsm.getDfRestoring(); + Set maybeOffline = fsm.getDfMaybeOffline(); + for (DfInfo dfInfo : dfInfos) { + fsm.checkFailure(dfInfo.getDfId()); + if (restoring.contains(dfInfo)) { + status = Status.RESTORING; + } else if (maybeOffline.contains(dfInfo)) { + status = Status.ARCHIVED; + break; + } else if (!mainStorage.exists(dfInfo.getDfLocation())) { + status = Status.ARCHIVED; + break; } - } else { - // Throw exception if selection does not exist - new DataSelection(icat, sessionId, investigationIds, datasetIds, datafileIds, Returns.DATASETS); } - } catch (IOException e) { - logger.error("I/O Exception " + e.getMessage() + " thrown"); - throw new InternalException(e.getClass() + " " + e.getMessage()); + } else { + // Throw exception if selection does not exist + new DataSelection(icat, sessionId, investigationIds, datasetIds, datafileIds, Returns.DATASETS); } logger.debug("Status is " + status.name()); @@ -1500,14 +1480,7 @@ public class IdsBean { throw new InternalException(e.getClass() + " " + e.getMessage()); } - // TODO Uncomment next line and delete subsequent five lines - // PreparedStatus status = preparedStatusMap.computeIfAbsent(preparedId, - // k -> new PreparedStatus()); - PreparedStatus nps = new PreparedStatus(); - PreparedStatus status = preparedStatusMap.putIfAbsent(preparedId, nps); - if (status == null) { - status = preparedStatusMap.get(preparedId); - } + PreparedStatus status = preparedStatusMap.computeIfAbsent(preparedId, k -> new PreparedStatus()); if (!status.lock.tryLock()) { logger.debug("Lock held for evaluation of isPrepared for preparedId {}", preparedId); @@ -1532,63 +1505,58 @@ public class IdsBean { } } - try { - if (storageUnit == StorageUnit.DATASET) { - Collection toCheck = status.fromDsElement == null ? preparedJson.dsInfos.values() - : preparedJson.dsInfos.tailMap(status.fromDsElement).values(); - logger.debug("Will check online status of {} entries", toCheck.size()); + if (storageUnit == StorageUnit.DATASET) { + Collection toCheck = status.fromDsElement == null ? preparedJson.dsInfos.values() + : preparedJson.dsInfos.tailMap(status.fromDsElement).values(); + logger.debug("Will check online status of {} entries", toCheck.size()); + for (DsInfo dsInfo : toCheck) { + fsm.checkFailure(dsInfo.getDsId()); + if (restoreIfOffline(dsInfo, preparedJson.emptyDatasets)) { + prepared = false; + status.fromDsElement = dsInfo.getDsId(); + toCheck = preparedJson.dsInfos.tailMap(status.fromDsElement).values(); + logger.debug("Will check in background status of {} entries", toCheck.size()); + status.future = threadPool.submit(new RunPrepDsCheck(toCheck, preparedJson.emptyDatasets)); + break; + } + } + if (prepared) { + toCheck = status.fromDsElement == null ? Collections.emptySet() + : preparedJson.dsInfos.headMap(status.fromDsElement).values(); + logger.debug("Will check finally online status of {} entries", toCheck.size()); for (DsInfo dsInfo : toCheck) { fsm.checkFailure(dsInfo.getDsId()); if (restoreIfOffline(dsInfo, preparedJson.emptyDatasets)) { prepared = false; - status.fromDsElement = dsInfo.getDsId(); - toCheck = preparedJson.dsInfos.tailMap(status.fromDsElement).values(); - logger.debug("Will check in background status of {} entries", toCheck.size()); - status.future = threadPool.submit(new RunPrepDsCheck(toCheck, preparedJson.emptyDatasets)); - break; } } - if (prepared) { - toCheck = status.fromDsElement == null ? Collections.emptySet() - : preparedJson.dsInfos.headMap(status.fromDsElement).values(); - logger.debug("Will check finally online status of {} entries", toCheck.size()); - for (DsInfo dsInfo : toCheck) { - fsm.checkFailure(dsInfo.getDsId()); - if (restoreIfOffline(dsInfo, preparedJson.emptyDatasets)) { - prepared = false; - } - } + } + } else if (storageUnit == StorageUnit.DATAFILE) { + SortedSet toCheck = status.fromDfElement == null ? preparedJson.dfInfos + : preparedJson.dfInfos.tailSet(status.fromDfElement); + logger.debug("Will check online status of {} entries", toCheck.size()); + for (DfInfoImpl dfInfo : toCheck) { + fsm.checkFailure(dfInfo.getDfId()); + if (restoreIfOffline(dfInfo)) { + prepared = false; + status.fromDfElement = dfInfo; + toCheck = preparedJson.dfInfos.tailSet(status.fromDfElement); + logger.debug("Will check in background status of {} entries", toCheck.size()); + status.future = threadPool.submit(new RunPrepDfCheck(toCheck)); + break; } - } else if (storageUnit == StorageUnit.DATAFILE) { - SortedSet toCheck = status.fromDfElement == null ? preparedJson.dfInfos - : preparedJson.dfInfos.tailSet(status.fromDfElement); - logger.debug("Will check online status of {} entries", toCheck.size()); + } + if (prepared) { + toCheck = status.fromDfElement == null ? new TreeSet<>() + : preparedJson.dfInfos.headSet(status.fromDfElement); + logger.debug("Will check finally online status of {} entries", toCheck.size()); for (DfInfoImpl dfInfo : toCheck) { fsm.checkFailure(dfInfo.getDfId()); if (restoreIfOffline(dfInfo)) { prepared = false; - status.fromDfElement = dfInfo; - toCheck = preparedJson.dfInfos.tailSet(status.fromDfElement); - logger.debug("Will check in background status of {} entries", toCheck.size()); - status.future = threadPool.submit(new RunPrepDfCheck(toCheck)); - break; - } - } - if (prepared) { - toCheck = status.fromDfElement == null ? new TreeSet<>() - : preparedJson.dfInfos.headSet(status.fromDfElement); - logger.debug("Will check finally online status of {} entries", toCheck.size()); - for (DfInfoImpl dfInfo : toCheck) { - fsm.checkFailure(dfInfo.getDfId()); - if (restoreIfOffline(dfInfo)) { - prepared = false; - } } } } - } catch (IOException e) { - logger.error("I/O error " + e.getMessage() + " isPrepared of " + preparedId); - throw new InternalException(e.getClass() + " " + e.getMessage()); } if (logSet.contains(CallType.INFO)) { @@ -1769,44 +1737,39 @@ public class IdsBean { } DsInfo dsInfo = new DsInfoImpl(ds); - String lockId = null; - if (storageUnit == StorageUnit.DATASET) { - /* - * Lock the datasets to prevent archiving of the datasets. It is - * important that they be unlocked again. - */ - Set dsIds = new HashSet<>(); - dsIds.add(datasetId); - lockId = fsm.lock(dsIds, FiniteStateMachine.SetLockType.ARCHIVE); - Set dfInfos = Collections.emptySet(); - Set emptyDatasets = new HashSet<>(); - try { - List counts = icat.search(sessionId, - "COUNT(Datafile) <-> Dataset [id=" + dsInfo.getDsId() + "]"); - if ((Long) counts.get(0) == 0) { - emptyDatasets.add(dsInfo.getDsId()); - } - } catch (IcatException_Exception e) { - fsm.unlock(lockId, FiniteStateMachine.SetLockType.ARCHIVE); - IcatExceptionType type = e.getFaultInfo().getType(); - if (type == IcatExceptionType.INSUFFICIENT_PRIVILEGES || type == IcatExceptionType.SESSION) { - throw new InsufficientPrivilegesException(e.getMessage()); - } - if (type == IcatExceptionType.NO_SUCH_OBJECT_FOUND) { - throw new NotFoundException(e.getMessage()); + try (Lock lock = lockManager.lock(dsInfo, LockType.SHARED)) { + if (storageUnit == StorageUnit.DATASET) { + Set dfInfos = Collections.emptySet(); + Set emptyDatasets = new HashSet<>(); + try { + List counts = icat.search(sessionId, + "COUNT(Datafile) <-> Dataset [id=" + dsInfo.getDsId() + "]"); + if ((Long) counts.get(0) == 0) { + emptyDatasets.add(dsInfo.getDsId()); + } + } catch (IcatException_Exception e) { + IcatExceptionType type = e.getFaultInfo().getType(); + if (type == IcatExceptionType.INSUFFICIENT_PRIVILEGES || type == IcatExceptionType.SESSION) { + throw new InsufficientPrivilegesException(e.getMessage()); + } + if (type == IcatExceptionType.NO_SUCH_OBJECT_FOUND) { + throw new NotFoundException(e.getMessage()); + } + throw new InternalException(type + " " + e.getMessage()); } - throw new InternalException(type + " " + e.getMessage()); + Set dsInfos = new HashSet<>(); + dsInfos.add(dsInfo); + checkOnline(dsInfos, emptyDatasets, dfInfos); } - Set dsInfos = new HashSet<>(); - dsInfos.add(dsInfo); - checkOnlineAndFreeLockOnFailure(dsInfos, emptyDatasets, dfInfos, lockId, - FiniteStateMachine.SetLockType.ARCHIVE); - } - try { CRC32 crc = new CRC32(); CheckedWithSizeInputStream is = new CheckedWithSizeInputStream(body, crc); - String location = mainStorage.put(dsInfo, name, is); + String location; + try { + location = mainStorage.put(dsInfo, name, is); + } catch (IllegalArgumentException e) { + throw new BadRequestException("Illegal filename or dataset: " + e.getMessage()); + } is.close(); long checksum = crc.getValue(); long size = is.getSize(); @@ -1864,14 +1827,13 @@ public class IdsBean { return Response.status(HttpURLConnection.HTTP_CREATED).entity(resp).build(); + } catch (AlreadyLockedException e) { + logger.debug("Could not acquire lock, put failed"); + throw new DataNotOnlineException("Data is busy"); } catch (IOException e) { logger.error("I/O exception " + e.getMessage() + " putting " + name + " to Dataset with id " + datasetIdString); throw new InternalException(e.getClass() + " " + e.getMessage()); - } finally { - if (lockId != null) { - fsm.unlock(lockId, FiniteStateMachine.SetLockType.ARCHIVE); - } } } catch (IdsException e) { @@ -2122,7 +2084,7 @@ public class IdsBean { } } - private boolean restoreIfOffline(DfInfoImpl dfInfo) throws InternalException, IOException { + private boolean restoreIfOffline(DfInfoImpl dfInfo) throws InternalException { boolean maybeOffline = false; if (fsm.getDfMaybeOffline().contains(dfInfo)) { maybeOffline = true; @@ -2133,7 +2095,7 @@ public class IdsBean { return maybeOffline; } - private boolean restoreIfOffline(DsInfo dsInfo, Set emptyDatasets) throws InternalException, IOException { + private boolean restoreIfOffline(DsInfo dsInfo, Set emptyDatasets) throws InternalException { boolean maybeOffline = false; if (fsm.getDsMaybeOffline().contains(dsInfo)) { maybeOffline = true; @@ -2143,4 +2105,77 @@ public class IdsBean { } return maybeOffline; } + + public void write(String sessionId, String investigationIds, String datasetIds, String datafileIds, String ip) + throws BadRequestException, InsufficientPrivilegesException, InternalException, NotFoundException, + DataNotOnlineException { + + long start = System.currentTimeMillis(); + + // Log and validate + logger.info("New webservice request: write " + "investigationIds='" + investigationIds + "' " + "datasetIds='" + + datasetIds + "' " + "datafileIds='" + datafileIds + "'"); + + validateUUID("sessionId", sessionId); + + final DataSelection dataSelection = new DataSelection(icat, sessionId, investigationIds, datasetIds, + datafileIds, Returns.DATASETS_AND_DATAFILES); + + // Do it + Map dsInfos = dataSelection.getDsInfo(); + Set dfInfos = dataSelection.getDfInfo(); + + try (Lock lock = lockManager.lock(dsInfos.values(), LockType.SHARED)) { + if (twoLevel) { + boolean maybeOffline = false; + if (storageUnit == StorageUnit.DATASET) { + for (DsInfo dsInfo : dsInfos.values()) { + if (!dataSelection.getEmptyDatasets().contains(dsInfo.getDsId()) && + !mainStorage.exists(dsInfo)) { + maybeOffline = true; + } + } + } else if (storageUnit == StorageUnit.DATAFILE) { + for (DfInfoImpl dfInfo : dfInfos) { + if (!mainStorage.exists(dfInfo.getDfLocation())) { + maybeOffline = true; + } + } + } + if (maybeOffline) { + throw new DataNotOnlineException("Requested data is not online, write request refused"); + } + } + + if (storageUnit == StorageUnit.DATASET) { + for (DsInfo dsInfo : dsInfos.values()) { + fsm.queue(dsInfo, DeferredOp.WRITE); + } + } else if (storageUnit == StorageUnit.DATAFILE) { + for (DfInfoImpl dfInfo : dfInfos) { + fsm.queue(dfInfo, DeferredOp.WRITE); + } + } + } catch (AlreadyLockedException e) { + logger.debug("Could not acquire lock, write failed"); + throw new DataNotOnlineException("Data is busy"); + } catch (IOException e) { + logger.error("I/O error " + e.getMessage() + " writing"); + throw new InternalException(e.getClass() + " " + e.getMessage()); + } + + if (logSet.contains(CallType.MIGRATE)) { + try { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (JsonGenerator gen = Json.createGenerator(baos).writeStartObject()) { + gen.write("userName", icat.getUserName(sessionId)); + addIds(gen, investigationIds, datasetIds, datafileIds); + gen.writeEnd(); + } + transmitter.processMessage("write", ip, baos.toString(), start); + } catch (IcatException_Exception e) { + logger.error("Failed to prepare jms message " + e.getClass() + " " + e.getMessage()); + } + } + } } diff --git a/src/main/java/org/icatproject/ids/IdsService.java b/src/main/java/org/icatproject/ids/IdsService.java index 063e52880e7d8feecb96b4e4deadaf2c3686e860..3b976a9393ff3e380db189ea1247f6a5d9353347 100644 --- a/src/main/java/org/icatproject/ids/IdsService.java +++ b/src/main/java/org/icatproject/ids/IdsService.java @@ -796,4 +796,40 @@ public class IdsService { idsBean.restore(sessionId, investigationIds, datasetIds, datafileIds, request.getRemoteAddr()); } + + /** + * Write data specified by the investigationIds, datasetIds + * and datafileIds specified along with a sessionId to archive + * storage. If two level storage is not in use this has no + * effect. + * + * @summary write + * + * @param sessionId + * A sessionId returned by a call to the icat server. + * @param investigationIds + * If present, a comma separated list of investigation id values + * @param datasetIds + * If present, a comma separated list of data set id values or + * null + * @param datafileIds + * If present, a comma separated list of datafile id values. + * + * @throws BadRequestException + * @throws InsufficientPrivilegesException + * @throws InternalException + * @throws NotFoundException + * + * @statuscode 200 To indicate success + */ + @POST + @Path("write") + @Consumes(MediaType.APPLICATION_FORM_URLENCODED) + public void write(@Context HttpServletRequest request, @FormParam("sessionId") String sessionId, + @FormParam("investigationIds") String investigationIds, @FormParam("datasetIds") String datasetIds, + @FormParam("datafileIds") String datafileIds) + throws BadRequestException, InsufficientPrivilegesException, InternalException, NotFoundException, + DataNotOnlineException { + idsBean.write(sessionId, investigationIds, datasetIds, datafileIds, request.getRemoteAddr()); + } } \ No newline at end of file diff --git a/src/main/java/org/icatproject/ids/LockManager.java b/src/main/java/org/icatproject/ids/LockManager.java new file mode 100644 index 0000000000000000000000000000000000000000..4f1a675c5461c90a2a7f86751c608ed74dcbb4e7 --- /dev/null +++ b/src/main/java/org/icatproject/ids/LockManager.java @@ -0,0 +1,179 @@ +package org.icatproject.ids; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import javax.annotation.PostConstruct; +import javax.ejb.Singleton; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.icatproject.ids.plugin.AlreadyLockedException; +import org.icatproject.ids.plugin.DsInfo; +import org.icatproject.ids.plugin.MainStorageInterface; + +@Singleton +public class LockManager { + + public enum LockType { + SHARED, EXCLUSIVE + } + + public class LockInfo { + public final Long id; + public final LockType type; + public final int count; + + LockInfo(LockEntry le) { + this.id = le.id; + this.type = le.type; + this.count = le.count; + } + } + + private class LockEntry { + final Long id; + final LockType type; + int count; + + LockEntry(Long id, LockType type) { + this.id = id; + this.type = type; + this.count = 0; + lockMap.put(id, this); + } + + void inc() { + count += 1; + } + + void dec() { + assert count > 0; + count -= 1; + if (count == 0) { + lockMap.remove(id); + } + } + } + + /** + * Define the common interface of SingleLock and LockCollection + */ + public abstract class Lock implements AutoCloseable { + public abstract void release(); + + public void close() { + release(); + } + } + + private class SingleLock extends Lock { + private final Long id; + private boolean isValid; + private AutoCloseable storageLock; + SingleLock(Long id, AutoCloseable storageLock) { + this.id = id; + this.isValid = true; + this.storageLock = storageLock; + } + + public void release() { + synchronized (lockMap) { + if (isValid) { + lockMap.get(id).dec(); + isValid = false; + if (storageLock != null) { + try { + storageLock.close(); + } catch (Exception e) { + logger.error("Error while closing lock on {} in the storage plugin: {}.", id, e.getMessage()); + } + } + logger.debug("Released a lock on {}.", id); + } + } + } + } + + private class LockCollection extends Lock { + private ArrayList locks; + + LockCollection() { + locks = new ArrayList<>(); + } + + void add(Lock l) { + locks.add(l); + } + + public void release() { + for (Lock l : locks) { + l.release(); + } + } + } + + private static Logger logger = LoggerFactory.getLogger(LockManager.class); + private PropertyHandler propertyHandler; + private MainStorageInterface mainStorage; + private Map lockMap = new HashMap<>(); + + @PostConstruct + private void init() { + propertyHandler = PropertyHandler.getInstance(); + mainStorage = propertyHandler.getMainStorage(); + logger.debug("LockManager initialized."); + } + + public Lock lock(DsInfo ds, LockType type) throws AlreadyLockedException, IOException { + Long id = ds.getDsId(); + assert id != null; + synchronized (lockMap) { + LockEntry le = lockMap.get(id); + if (le == null) { + le = new LockEntry(id, type); + } else { + if (type == LockType.EXCLUSIVE || le.type == LockType.EXCLUSIVE) { + throw new AlreadyLockedException(); + } + } + le.inc(); + AutoCloseable storageLock; + try { + storageLock = mainStorage.lock(ds, type == LockType.SHARED); + } catch (AlreadyLockedException | IOException e) { + le.dec(); + throw e; + } + logger.debug("Acquired a {} lock on {}.", type, id); + return new SingleLock(id, storageLock); + } + } + + public Lock lock(Collection datasets, LockType type) throws AlreadyLockedException, IOException { + LockCollection locks = new LockCollection(); + try { + for (DsInfo ds : datasets) { + locks.add(lock(ds, type)); + } + } catch (AlreadyLockedException | IOException e) { + locks.release(); + throw e; + } + return locks; + } + + public Collection getLockInfo() { + Collection lockInfo = new ArrayList<>(); + synchronized (lockMap) { + for (LockEntry le : lockMap.values()) { + lockInfo.add(new LockInfo(le)); + } + return lockInfo; + } + } + +} \ No newline at end of file diff --git a/src/main/java/org/icatproject/ids/PropertyHandler.java b/src/main/java/org/icatproject/ids/PropertyHandler.java index 1eddc4dc3e2a2eb3c6d57e292f7032aa8b778f20..387813bb663ea32202c1e0bb2e07e2d11b5adde9 100644 --- a/src/main/java/org/icatproject/ids/PropertyHandler.java +++ b/src/main/java/org/icatproject/ids/PropertyHandler.java @@ -68,7 +68,8 @@ public class PropertyHandler { private long startArchivingLevel; private long stopArchivingLevel; private StorageUnit storageUnit; - private long writeDelaySeconds; + private long delayDatasetWrites; + private long delayDatafileOperations; private ZipMapperInterface zipMapper; private int tidyBlockSize; private String key; @@ -145,8 +146,6 @@ public class PropertyHandler { if (!props.has("plugin.archive.class")) { logger.info("Property plugin.archive.class not set, single storage enabled."); } else { - writeDelaySeconds = props.getPositiveLong("writeDelaySeconds"); - try { Class klass = (Class) Class .forName(props.getString("plugin.archive.class")); @@ -174,6 +173,23 @@ public class PropertyHandler { } abort("storageUnit value " + props.getString("storageUnit") + " must be taken from " + vs); } + if (storageUnit == StorageUnit.DATASET) { + if (!props.has("delayDatasetWritesSeconds") && props.has("writeDelaySeconds")) { + // compatibility mode + logger.warn("writeDelaySeconds is deprecated, please use delayDatasetWritesSeconds instead"); + delayDatasetWrites = props.getPositiveLong("writeDelaySeconds"); + } else { + delayDatasetWrites = props.getPositiveLong("delayDatasetWritesSeconds"); + } + } else if (storageUnit == StorageUnit.DATAFILE) { + if (!props.has("delayDatafileOperationsSeconds") && props.has("writeDelaySeconds")) { + // compatibility mode + logger.warn("writeDelaySeconds is deprecated, please use delayDatafileOperationsSeconds instead"); + delayDatafileOperations = props.getPositiveLong("writeDelaySeconds"); + } else { + delayDatafileOperations = props.getPositiveLong("delayDatafileOperationsSeconds"); + } + } tidyBlockSize = props.getPositiveInt("tidyBlockSize"); } @@ -375,8 +391,12 @@ public class PropertyHandler { return tidyBlockSize; } - public long getWriteDelaySeconds() { - return writeDelaySeconds; + public long getDelayDatasetWrites() { + return delayDatasetWrites; + } + + public long getDelayDatafileOperations() { + return delayDatafileOperations; } public ZipMapperInterface getZipMapper() { diff --git a/src/main/java/org/icatproject/ids/Tidier.java b/src/main/java/org/icatproject/ids/Tidier.java index 3d7cb2dd2d1a5a12e485e273ec210c90a3c9e531..12170ddfc21f5e024e20d24b34eb32cd00d9f4d4 100644 --- a/src/main/java/org/icatproject/ids/Tidier.java +++ b/src/main/java/org/icatproject/ids/Tidier.java @@ -170,36 +170,36 @@ public class Tidier { } } - private boolean addStringConstraint(StringBuilder sb, String var, String value, boolean andNeeded) { - if (value != null) { - if (andNeeded) { - sb.append(" AND "); - } else { - sb.append(" "); - andNeeded = true; - } - sb.append(var + " = '" + value + "'"); + } + + private final static Logger logger = LoggerFactory.getLogger(Tidier.class); + + static boolean addStringConstraint(StringBuilder sb, String var, String value, boolean andNeeded) { + if (value != null) { + if (andNeeded) { + sb.append(" AND "); + } else { + sb.append(" "); + andNeeded = true; } - return andNeeded; + sb.append(var + " = '" + value.replaceAll("'", "''") + "'"); } + return andNeeded; + } - private boolean addNumericConstraint(StringBuilder sb, String var, Long value, boolean andNeeded) { - if (value != null) { - if (andNeeded) { - sb.append(" AND "); - } else { - sb.append(" "); - andNeeded = true; - } - sb.append(var + " = " + value); + static boolean addNumericConstraint(StringBuilder sb, String var, Long value, boolean andNeeded) { + if (value != null) { + if (andNeeded) { + sb.append(" AND "); + } else { + sb.append(" "); + andNeeded = true; } - return andNeeded; + sb.append(var + " = " + value); } - + return andNeeded; } - private final static Logger logger = LoggerFactory.getLogger(Tidier.class);; - static void cleanPreparedDir(Path preparedDir, int preparedCount) throws IOException { Map dateMap = new HashMap<>(); diff --git a/src/main/java/org/icatproject/ids/Transmitter.java b/src/main/java/org/icatproject/ids/Transmitter.java index cfc954476a76e940346bd9a28cfe52bfa24c6e81..8668600aa66299d30fa8a5436ec9cbb594fa555f 100644 --- a/src/main/java/org/icatproject/ids/Transmitter.java +++ b/src/main/java/org/icatproject/ids/Transmitter.java @@ -38,7 +38,7 @@ public class Transmitter { .lookup(propertyHandler.getJmsTopicConnectionFactory()); topicConnection = topicConnectionFactory.createTopicConnection(); topic = (Topic) ic.lookup("jms/IDS/log"); - logger.info("Transmitter created"); + logger.info("Notification Transmitter created"); } catch (JMSException | NamingException e) { logger.error(fatal, "Problem with JMS " + e); throw new IllegalStateException(e.getMessage()); @@ -52,15 +52,14 @@ public class Transmitter { if (topicConnection != null) { topicConnection.close(); } - logger.info("Transmitter closing down"); + logger.info("Notification Transmitter closing down"); } catch (JMSException e) { throw new IllegalStateException(e.getMessage()); } } public void processMessage(String operation, String ip, String body, long startMillis) { - try { - Session jmsSession = topicConnection.createSession(false, Session.AUTO_ACKNOWLEDGE); + try (Session jmsSession = topicConnection.createSession(false, Session.AUTO_ACKNOWLEDGE)) { TextMessage jmsg = jmsSession.createTextMessage(body); jmsg.setStringProperty("operation", operation); jmsg.setStringProperty("ip", ip); @@ -69,7 +68,6 @@ public class Transmitter { MessageProducer jmsProducer = jmsSession.createProducer(topic); jmsProducer.send(jmsg); logger.debug("Sent jms message " + operation + " " + ip); - jmsSession.close(); } catch (JMSException e) { logger.error("Failed to send jms message " + operation + " " + ip); } diff --git a/src/main/java/org/icatproject/ids/exceptions/DataNotOnlineException.java b/src/main/java/org/icatproject/ids/exceptions/DataNotOnlineException.java index 8084f9f41f7e18d4014afdc93015c31d56602a10..bed9204d098261ccf427ee3a8cc3b5baf787d308 100644 --- a/src/main/java/org/icatproject/ids/exceptions/DataNotOnlineException.java +++ b/src/main/java/org/icatproject/ids/exceptions/DataNotOnlineException.java @@ -6,7 +6,7 @@ import java.net.HttpURLConnection; public class DataNotOnlineException extends IdsException { public DataNotOnlineException(String msg) { - super(HttpURLConnection.HTTP_NOT_FOUND, msg); + super(HttpURLConnection.HTTP_UNAVAILABLE, msg); } } diff --git a/src/main/java/org/icatproject/ids/thread/DfArchiver.java b/src/main/java/org/icatproject/ids/thread/DfArchiver.java index 7be029917348561299296562f751db090d36a067..03507de89e743613b18092daf243bdaf041c29e9 100644 --- a/src/main/java/org/icatproject/ids/thread/DfArchiver.java +++ b/src/main/java/org/icatproject/ids/thread/DfArchiver.java @@ -2,9 +2,11 @@ package org.icatproject.ids.thread; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Collection; import java.util.List; import org.icatproject.ids.FiniteStateMachine; +import org.icatproject.ids.LockManager.Lock; import org.icatproject.ids.PropertyHandler; import org.icatproject.ids.plugin.DfInfo; import org.icatproject.ids.plugin.MainStorageInterface; @@ -21,30 +23,38 @@ public class DfArchiver implements Runnable { private FiniteStateMachine fsm; private List dfInfos; private Path markerDir; + private Collection locks; - public DfArchiver(List dfInfos, PropertyHandler propertyHandler, FiniteStateMachine fsm) { + public DfArchiver(List dfInfos, PropertyHandler propertyHandler, FiniteStateMachine fsm, Collection locks) { this.dfInfos = dfInfos; this.fsm = fsm; + this.locks = locks; mainStorageInterface = propertyHandler.getMainStorage(); markerDir = propertyHandler.getCacheDir().resolve("marker"); } @Override public void run() { - for (DfInfo dfInfo : dfInfos) { - try { - if (Files.exists(markerDir.resolve(Long.toString(dfInfo.getDfId())))) { - logger.error("Archive of " + dfInfo - + " not carried out because a write to secondary storage operation failed previously"); - } else { - String dfLocation = dfInfo.getDfLocation(); - mainStorageInterface.delete(dfLocation, dfInfo.getCreateId(), dfInfo.getModId()); - logger.debug("Archive of " + dfInfo + " completed"); + try { + for (DfInfo dfInfo : dfInfos) { + try { + if (Files.exists(markerDir.resolve(Long.toString(dfInfo.getDfId())))) { + logger.error("Archive of " + dfInfo + + " not carried out because a write to secondary storage operation failed previously"); + } else { + String dfLocation = dfInfo.getDfLocation(); + mainStorageInterface.delete(dfLocation, dfInfo.getCreateId(), dfInfo.getModId()); + logger.debug("Archive of " + dfInfo + " completed"); + } + } catch (Exception e) { + logger.error("Archive of " + dfInfo + " failed due to " + e.getClass() + " " + e.getMessage()); + } finally { + fsm.removeFromChanging(dfInfo); } - } catch (Exception e) { - logger.error("Archive of " + dfInfo + " failed due to " + e.getClass() + " " + e.getMessage()); - } finally { - fsm.removeFromChanging(dfInfo); + } + } finally { + for (Lock l: locks) { + l.release(); } } } diff --git a/src/main/java/org/icatproject/ids/thread/DfDeleter.java b/src/main/java/org/icatproject/ids/thread/DfDeleter.java index b69ba595218c243f6ed873c4c47a41186ed55f58..da172d2a06c840e4204a07264fa05629e3af9642 100644 --- a/src/main/java/org/icatproject/ids/thread/DfDeleter.java +++ b/src/main/java/org/icatproject/ids/thread/DfDeleter.java @@ -1,8 +1,10 @@ package org.icatproject.ids.thread; +import java.util.Collection; import java.util.List; import org.icatproject.ids.FiniteStateMachine; +import org.icatproject.ids.LockManager.Lock; import org.icatproject.ids.PropertyHandler; import org.icatproject.ids.plugin.ArchiveStorageInterface; import org.icatproject.ids.plugin.DfInfo; @@ -17,30 +19,35 @@ public class DfDeleter implements Runnable { private final static Logger logger = LoggerFactory.getLogger(DfDeleter.class); private FiniteStateMachine fsm; - private ArchiveStorageInterface archiveStorageInterface; - private List dfInfos; + private Collection locks; - public DfDeleter(List dfInfos, PropertyHandler propertyHandler, FiniteStateMachine fsm) { + public DfDeleter(List dfInfos, PropertyHandler propertyHandler, FiniteStateMachine fsm, Collection locks) { this.dfInfos = dfInfos; this.fsm = fsm; + this.locks = locks; archiveStorageInterface = propertyHandler.getArchiveStorage(); } @Override public void run() { - for (DfInfo dfInfo : dfInfos) { - try { - String dfLocation = dfInfo.getDfLocation(); - archiveStorageInterface.delete(dfLocation); - logger.debug("Delete of " + dfInfo + " completed"); - } catch (Exception e) { - logger.error("Delete of " + dfInfo + " failed due to " + e.getClass() + " " + e.getMessage()); - } finally { - fsm.removeFromChanging(dfInfo); + try { + for (DfInfo dfInfo : dfInfos) { + try { + String dfLocation = dfInfo.getDfLocation(); + archiveStorageInterface.delete(dfLocation); + logger.debug("Delete of " + dfInfo + " completed"); + } catch (Exception e) { + logger.error("Delete of " + dfInfo + " failed due to " + e.getClass() + " " + e.getMessage()); + } finally { + fsm.removeFromChanging(dfInfo); + } + } + } finally { + for (Lock l: locks) { + l.release(); } } - } } diff --git a/src/main/java/org/icatproject/ids/thread/DfRestorer.java b/src/main/java/org/icatproject/ids/thread/DfRestorer.java index 39aa6652334d86af5fca6f88f50a12dc3e513293..e0948dd62b98da58cdd2cb46c13b4ddb92b4f129 100644 --- a/src/main/java/org/icatproject/ids/thread/DfRestorer.java +++ b/src/main/java/org/icatproject/ids/thread/DfRestorer.java @@ -1,11 +1,13 @@ package org.icatproject.ids.thread; import java.io.IOException; +import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Set; import org.icatproject.ids.FiniteStateMachine; +import org.icatproject.ids.LockManager.Lock; import org.icatproject.ids.PropertyHandler; import org.icatproject.ids.plugin.ArchiveStorageInterface; import org.icatproject.ids.plugin.DfInfo; @@ -23,12 +25,13 @@ public class DfRestorer implements Runnable { private MainStorageInterface mainStorageInterface; private ArchiveStorageInterface archiveStorageInterface; private FiniteStateMachine fsm; - private List dfInfos; + private Collection locks; - public DfRestorer(List dfInfos, PropertyHandler propertyHandler, FiniteStateMachine fsm) { + public DfRestorer(List dfInfos, PropertyHandler propertyHandler, FiniteStateMachine fsm, Collection locks) { this.dfInfos = dfInfos; this.fsm = fsm; + this.locks = locks; mainStorageInterface = propertyHandler.getMainStorage(); archiveStorageInterface = propertyHandler.getArchiveStorage(); @@ -42,26 +45,21 @@ public class DfRestorer implements Runnable { */ @Override public void run() { - /* - * This code avoids unnecessary calls to restore files. It will not - * generally remove anything from the list of files to restore as - * pointless restores are normally filtered out earlier. - */ - Iterator iter = dfInfos.iterator(); - while (iter.hasNext()) { - DfInfo dfInfo = iter.next(); - try { + try { + /* + * This code avoids unnecessary calls to restore files. It will not + * generally remove anything from the list of files to restore as + * pointless restores are normally filtered out earlier. + */ + Iterator iter = dfInfos.iterator(); + while (iter.hasNext()) { + DfInfo dfInfo = iter.next(); if (mainStorageInterface.exists(dfInfo.getDfLocation())) { iter.remove(); fsm.removeFromChanging(dfInfo); } - } catch (IOException e) { - logger.error("Check on existence of {} failed with {} {}", dfInfo.getDfLocation(), e.getClass(), - e.getMessage()); } - } - try { Set failures = archiveStorageInterface.restore(mainStorageInterface, dfInfos); for (DfInfo dfInfo : dfInfos) { if (failures.contains(dfInfo)) { @@ -73,13 +71,15 @@ public class DfRestorer implements Runnable { } fsm.removeFromChanging(dfInfo); } - } catch (IOException e) { + } catch (Exception e) { for (DfInfo dfInfo : dfInfos) { logger.error("Restore of " + dfInfo + " failed " + e.getClass() + " " + e.getMessage()); fsm.removeFromChanging(dfInfo); } - return; + } finally { + for (Lock l: locks) { + l.release(); + } } - } -} \ No newline at end of file +} diff --git a/src/main/java/org/icatproject/ids/thread/DfWriter.java b/src/main/java/org/icatproject/ids/thread/DfWriter.java index e6a05074ebaee52c20d23680b701c9422d3f9a89..bc63c24ca04187645b58a067a18b66fbf829eee5 100644 --- a/src/main/java/org/icatproject/ids/thread/DfWriter.java +++ b/src/main/java/org/icatproject/ids/thread/DfWriter.java @@ -3,9 +3,11 @@ package org.icatproject.ids.thread; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Collection; import java.util.List; import org.icatproject.ids.FiniteStateMachine; +import org.icatproject.ids.LockManager.Lock; import org.icatproject.ids.PropertyHandler; import org.icatproject.ids.plugin.ArchiveStorageInterface; import org.icatproject.ids.plugin.DfInfo; @@ -23,14 +25,14 @@ public class DfWriter implements Runnable { private FiniteStateMachine fsm; private MainStorageInterface mainStorageInterface; private ArchiveStorageInterface archiveStorageInterface; - private Path markerDir; - private List dfInfos; + private Collection locks; - public DfWriter(List dfInfos, PropertyHandler propertyHandler, FiniteStateMachine fsm) { + public DfWriter(List dfInfos, PropertyHandler propertyHandler, FiniteStateMachine fsm, Collection locks) { this.dfInfos = dfInfos; this.fsm = fsm; + this.locks = locks; mainStorageInterface = propertyHandler.getMainStorage(); archiveStorageInterface = propertyHandler.getArchiveStorage(); markerDir = propertyHandler.getCacheDir().resolve("marker"); @@ -38,20 +40,25 @@ public class DfWriter implements Runnable { @Override public void run() { - for (DfInfo dfInfo : dfInfos) { - String dfLocation = dfInfo.getDfLocation(); - try (InputStream is = mainStorageInterface.get(dfLocation, dfInfo.getCreateId(), dfInfo.getModId())) { - archiveStorageInterface.put(is, dfLocation); - Path marker = markerDir.resolve(Long.toString(dfInfo.getDfId())); - Files.deleteIfExists(marker); - logger.debug("Removed marker " + marker); - logger.debug("Write of " + dfInfo + " completed"); - } catch (Exception e) { - logger.error("Write of " + dfInfo + " failed due to " + e.getClass() + " " + e.getMessage()); - } finally { - fsm.removeFromChanging(dfInfo); + try { + for (DfInfo dfInfo : dfInfos) { + String dfLocation = dfInfo.getDfLocation(); + try (InputStream is = mainStorageInterface.get(dfLocation, dfInfo.getCreateId(), dfInfo.getModId())) { + archiveStorageInterface.put(is, dfLocation); + Path marker = markerDir.resolve(Long.toString(dfInfo.getDfId())); + Files.deleteIfExists(marker); + logger.debug("Removed marker " + marker); + logger.debug("Write of " + dfInfo + " completed"); + } catch (Exception e) { + logger.error("Write of " + dfInfo + " failed due to " + e.getClass() + " " + e.getMessage()); + } finally { + fsm.removeFromChanging(dfInfo); + } + } + } finally { + for (Lock l: locks) { + l.release(); } } - } } diff --git a/src/main/java/org/icatproject/ids/thread/DsArchiver.java b/src/main/java/org/icatproject/ids/thread/DsArchiver.java index 284fbe12afbeef28fdf66fd605429b4ab23602b2..4e128a5b2a34077e79f25243c35e0026eb77ac3c 100644 --- a/src/main/java/org/icatproject/ids/thread/DsArchiver.java +++ b/src/main/java/org/icatproject/ids/thread/DsArchiver.java @@ -4,6 +4,7 @@ import java.nio.file.Files; import java.nio.file.Path; import org.icatproject.ids.FiniteStateMachine; +import org.icatproject.ids.LockManager.Lock; import org.icatproject.ids.PropertyHandler; import org.icatproject.ids.plugin.DsInfo; import org.icatproject.ids.plugin.MainStorageInterface; @@ -20,12 +21,14 @@ public class DsArchiver implements Runnable { private MainStorageInterface mainStorageInterface; private FiniteStateMachine fsm; private Path markerDir; + private Lock lock; - public DsArchiver(DsInfo dsInfo, PropertyHandler propertyHandler, FiniteStateMachine fsm) { + public DsArchiver(DsInfo dsInfo, PropertyHandler propertyHandler, FiniteStateMachine fsm, Lock lock) { this.dsInfo = dsInfo; this.fsm = fsm; mainStorageInterface = propertyHandler.getMainStorage(); markerDir = propertyHandler.getCacheDir().resolve("marker"); + this.lock = lock; } @Override @@ -42,6 +45,7 @@ public class DsArchiver implements Runnable { logger.error("Archive of " + dsInfo + " failed due to " + e.getMessage()); } finally { fsm.removeFromChanging(dsInfo); + lock.release(); } } } diff --git a/src/main/java/org/icatproject/ids/thread/DsRestorer.java b/src/main/java/org/icatproject/ids/thread/DsRestorer.java index c6b175fe667b9a43a0370ec4222503df55392556..d3bd52b53381df10fe0ca40f7184cec7e27217b6 100644 --- a/src/main/java/org/icatproject/ids/thread/DsRestorer.java +++ b/src/main/java/org/icatproject/ids/thread/DsRestorer.java @@ -4,8 +4,10 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; @@ -14,6 +16,7 @@ import org.icatproject.Dataset; import org.icatproject.ids.FiniteStateMachine; import org.icatproject.ids.IcatReader; import org.icatproject.ids.IdsBean; +import org.icatproject.ids.LockManager.Lock; import org.icatproject.ids.PropertyHandler; import org.icatproject.ids.plugin.ArchiveStorageInterface; import org.icatproject.ids.plugin.DsInfo; @@ -40,8 +43,9 @@ public class DsRestorer implements Runnable { private IcatReader reader; private ZipMapperInterface zipMapper; + private Lock lock; - public DsRestorer(DsInfo dsInfo, PropertyHandler propertyHandler, FiniteStateMachine fsm, IcatReader reader) { + public DsRestorer(DsInfo dsInfo, PropertyHandler propertyHandler, FiniteStateMachine fsm, IcatReader reader, Lock lock) { this.dsInfo = dsInfo; this.fsm = fsm; zipMapper = propertyHandler.getZipMapper(); @@ -49,6 +53,7 @@ public class DsRestorer implements Runnable { archiveStorageInterface = propertyHandler.getArchiveStorage(); datasetCache = propertyHandler.getCacheDir().resolve("dataset"); this.reader = reader; + this.lock = lock; } @Override @@ -59,13 +64,8 @@ public class DsRestorer implements Runnable { * generally do anything as pointless restores are normally filtered * out earlier. */ - try { - if (mainStorageInterface.exists(dsInfo)) { - return; - } - } catch (IOException e) { - logger.error("Check on existence of {} failed with {} {}", dsInfo.getDsLocation(), e.getClass(), - e.getMessage()); + if (mainStorageInterface.exists(dsInfo)) { + return; } long size = 0; @@ -74,6 +74,9 @@ public class DsRestorer implements Runnable { .getDatafiles(); Map nameToLocalMap = new HashMap<>(datafiles.size()); for (Datafile datafile : datafiles) { + if (datafile.getLocation() == null) { + continue; + } nameToLocalMap.put(datafile.getName(), IdsBean.getLocation(datafile.getId(), datafile.getLocation())); size += datafile.getFileSize(); n++; @@ -91,25 +94,37 @@ public class DsRestorer implements Runnable { + " files of total size " + size); ZipInputStream zis = new ZipInputStream(Files.newInputStream(datasetCachePath)); ZipEntry ze = zis.getNextEntry(); + Set seen = new HashSet<>(); while (ze != null) { String dfName = zipMapper.getFileName(ze.getName()); + if (seen.contains(dfName)) { + throw new RuntimeException("Corrupt archive for " + dsInfo + ": duplicate entry " + dfName); + } String location = nameToLocalMap.get(dfName); if (location == null) { - logger.error("Unable to store " + dfName + " into " + dsInfo + " as no location found"); - } else { - mainStorageInterface.put(zis, location); + throw new RuntimeException("Corrupt archive for " + dsInfo + ": spurious entry " + dfName); } + mainStorageInterface.put(zis, location); ze = zis.getNextEntry(); + seen.add(dfName); } zis.close(); + if (!seen.equals(nameToLocalMap.keySet())) { + throw new RuntimeException("Corrupt archive for " + dsInfo + ": missing entries"); + } Files.delete(datasetCachePath); fsm.recordSuccess(dsInfo.getDsId()); logger.debug("Restore of " + dsInfo + " completed"); } catch (Exception e) { fsm.recordFailure(dsInfo.getDsId()); logger.error("Restore of " + dsInfo + " failed due to " + e.getClass() + " " + e.getMessage()); + try { + mainStorageInterface.delete(dsInfo); + } catch (IOException e2) { + } } finally { fsm.removeFromChanging(dsInfo); + lock.release(); } } -} \ No newline at end of file +} diff --git a/src/main/java/org/icatproject/ids/thread/DsWriter.java b/src/main/java/org/icatproject/ids/thread/DsWriter.java index 3250adb9e9d3160783bfd55d1894c2ccdb78ed01..9a2a0b49316bb22ccdc32c96fa4312a4daaf23f3 100644 --- a/src/main/java/org/icatproject/ids/thread/DsWriter.java +++ b/src/main/java/org/icatproject/ids/thread/DsWriter.java @@ -15,6 +15,7 @@ import org.icatproject.ids.DfInfoImpl; import org.icatproject.ids.FiniteStateMachine; import org.icatproject.ids.IcatReader; import org.icatproject.ids.IdsBean; +import org.icatproject.ids.LockManager.Lock; import org.icatproject.ids.PropertyHandler; import org.icatproject.ids.plugin.ArchiveStorageInterface; import org.icatproject.ids.plugin.DsInfo; @@ -39,8 +40,9 @@ public class DsWriter implements Runnable { private Path markerDir; private IcatReader reader; private ZipMapperInterface zipMapper; + private Lock lock; - public DsWriter(DsInfo dsInfo, PropertyHandler propertyHandler, FiniteStateMachine fsm, IcatReader reader) { + public DsWriter(DsInfo dsInfo, PropertyHandler propertyHandler, FiniteStateMachine fsm, IcatReader reader, Lock lock) { this.dsInfo = dsInfo; this.fsm = fsm; this.zipMapper = propertyHandler.getZipMapper(); @@ -49,6 +51,7 @@ public class DsWriter implements Runnable { datasetCache = propertyHandler.getCacheDir().resolve("dataset"); markerDir = propertyHandler.getCacheDir().resolve("marker"); this.reader = reader; + this.lock = lock; } @Override @@ -66,6 +69,9 @@ public class DsWriter implements Runnable { ZipOutputStream zos = new ZipOutputStream( Files.newOutputStream(datasetCachePath, StandardOpenOption.CREATE)); for (Datafile datafile : datafiles) { + if (datafile.getLocation() == null) { + continue; + } String location = IdsBean.getLocation(datafile.getId(), datafile.getLocation()); InputStream is = null; try { @@ -101,6 +107,7 @@ public class DsWriter implements Runnable { logger.error("Write of " + dsInfo + " failed due to " + e.getClass() + " " + e.getMessage()); } finally { fsm.removeFromChanging(dsInfo); + lock.release(); } } } diff --git a/src/main/resources/logback.xml b/src/main/resources/logback.xml index 27196c82c43eaeba313150edc854717d225c7525..21bb62a00b6b7426ffb6d6d0b5e394ee3ba48564 100644 --- a/src/main/resources/logback.xml +++ b/src/main/resources/logback.xml @@ -14,12 +14,12 @@ - %date %level [%thread] %C{0} %L - %msg%n + %date %level [%thread] %C{0} - %msg%n - + diff --git a/src/main/resources/run.properties b/src/main/resources/run.properties deleted file mode 100644 index fc1e19cc92b9a3ef757bad125debc599e5665b76..0000000000000000000000000000000000000000 --- a/src/main/resources/run.properties +++ /dev/null @@ -1,31 +0,0 @@ -icat.url = ${serverUrl} - -plugin.zipMapper.class = org.icatproject.ids.storage_test.ZipMapper - -plugin.main.class = org.icatproject.ids.storage_test.MainFileStorage -plugin.main.dir = ${HOME}/ids/main/ - -cache.dir = ${HOME}/ids/cache -preparedCount = 10000 -processQueueIntervalSeconds = 5 -rootUserNames = root -sizeCheckIntervalSeconds = 60 -reader = db username root password password -maxIdsInQuery = 1000 - -plugin.archive.class = org.icatproject.ids.storage_test.ArchiveFileStorage -plugin.archive.dir = ${HOME}/ids/archive/ -writeDelaySeconds = 60 -startArchivingLevel1024bytes = 5000000 -stopArchivingLevel1024bytes = 4000000 -storageUnit = dataset -tidyBlockSize = 500 - -filesCheck.parallelCount = 0 -filesCheck.gapSeconds = 5 -filesCheck.lastIdFile = ${HOME}/ids/lastIdFile -filesCheck.errorLog = ${HOME}/ids/errorLog - -linkLifetimeSeconds = 3600 - -log.list = READ WRITE diff --git a/src/main/scripts/setup b/src/main/scripts/setup index a2d98247010eeb197db4e20793033c5f391de2ba..28d138e9b033987053fd60e4003dc0c49072fca0 100755 --- a/src/main/scripts/setup +++ b/src/main/scripts/setup @@ -30,10 +30,18 @@ if arg == "INSTALL": abort("Please create directory " + idsProperties.get("cache.dir") + " as specified in run.properties") if idsProperties.get("plugin.archive.class"): - if not idsProperties.get("writeDelaySeconds"): abort("writeDelaySeconds is not set in run.properties") if not idsProperties.get("startArchivingLevel1024bytes"): abort("startArchivingLevel1024bytes is not set in run.properties") if not idsProperties.get("stopArchivingLevel1024bytes"): abort("stopArchivingLevel1024bytes is not set in run.properties") if not idsProperties.get("tidyBlockSize"): abort("tidyBlockSize is not set in ids.properties") + if not idsProperties.get("storageUnit"): abort("storageUnit is not set in run.properties") + if idsProperties["storageUnit"].lower == "dataset": + if not (idsProperties.get("delayDatasetWritesSeconds") or + idsProperties.get("writeDelaySeconds")): + abort("delayDatasetWritesSeconds is not set in run.properties") + if idsProperties["storageUnit"].lower == "datafile": + if not (idsProperties.get("delayDatafileOperationsSeconds") or + idsProperties.get("writeDelaySeconds")): + abort("delayDatafileOperationsSeconds is not set in run.properties") if int(idsProperties["filesCheck.parallelCount"]): if not idsProperties.get("filesCheck.gapSeconds"): abort("filesCheck.gapSeconds is not set in run.properties") diff --git a/src/site/xhtml/installation.xhtml.vm b/src/site/xhtml/installation.xhtml.vm index 63aa346477cc9073adeeaae22180d2ce139e0af0..d3958375e6f59fbce63aabd98022f2a0ef879da7 100644 --- a/src/site/xhtml/installation.xhtml.vm +++ b/src/site/xhtml/installation.xhtml.vm @@ -10,7 +10,7 @@

Compatibility

This will work with an ICAT version of 4.3.0 or greater and - requires plugins implementing the IDS plugin interface 1.3.x.

+ requires plugins implementing the IDS plugin interface 1.5.0.

Prerequisites

@@ -176,10 +176,17 @@ be deployed in the lib/applibs directory of your domain and must be packaged with all it dependencies. -
writeDelaySeconds
-
The amount of time to wait before writing to archive storage. +
delayDatasetWritesSeconds
+
The amount of time to wait before writing a dataset archive storage. This exists to allow enough time for all the datafiles to be added to - a dataset before it is zipped and written.
+ a dataset before it is zipped and written. This property is only + used if storageUnit is set to dataset, see below. + +
delayDatafileOperationsSeconds
+
The amount of time to wait before processing any deferred operations for + datafiles. Operations are collected during this period of time and + processed at once in combined threads. This property is only + used if storageUnit is set to datafile, see below.
startArchivingLevel1024bytes
If the space used in main storage exceeds this then datasets diff --git a/src/site/xhtml/release-notes.xhtml b/src/site/xhtml/release-notes.xhtml index 00212d73479613a78047fd38c4716588e52d4c39..86066444abc9cf39a30ee4e59f6f27af8900c15a 100644 --- a/src/site/xhtml/release-notes.xhtml +++ b/src/site/xhtml/release-notes.xhtml @@ -6,6 +6,59 @@

IDS Server Release Notes

+

1.10.1

+

Bug fix release

+
    +
  • Fix write call fails with DataNotOnlineError if an ARCHIVE request is pending. + (Issue #101)
  • +
  • Documentation fixes.
  • +
+ +

1.10.0

+

Add file system locking in the storage plugin

+
    +
  • Call mainStorage.lock(DsInfo, boolean) whenever a dataset is locked internally. + This call has been added to MainStorageInterface ids.plugin 1.5.0. The + plugin may implement this call to acquire a file system lock. This would + then allow safe concurrent access to main storage for other processes. + (Issue #80)
  • +
  • Add new configuration properties delayDatasetWritesSeconds and + delayDatafileOperationsSeconds, replacing writeDelaySeconds. Deprecate + writeDelaySeconds. (Issue #94)
  • +
  • Error handling in DsRestorer in the case of a corrupt ZIP archive + (Issue #96).
  • +
  • Require ids.plugin 1.5.0.
  • +
+ +

1.9.1

+

Bug fix release

+
    +
  • Fix uncaught exception in DfRestorer can cause restoration to stall + until IDS is restarted. (Issue #87)
  • +
  • Fix once again JMS Session not closed if an exception is thrown (Issue #85). + The previous fix from 1.9.0 was not thread safe.
  • +
  • Fix: must escape single quote character in datafile location values when + building JPQL queries (Issue #92).
  • +
+ +

1.9.0

+

Add a new Write API call and review the internal locking

+
    +
  • Add a new Write API call that triggers write to + archive storage. (PR #67)
  • +
  • Review of the internal locking mechanism which + fixes various race conditions. (PR #59)
  • +
  • Ignore Datafiles having the location attribute not set. (Issue #63)
  • +
  • Change the HTTP status returned on DataNotOnlineException to 503. + (Issue #84)
  • +
  • Fix JMS Session not closed if an exception is thrown. (Issue #85)
  • +
  • Add a configuration option "testHome" to be set in + settings.xml. Files are created relative to + this directory when running the tests. (Issue #75)
  • +
  • Tests are now independent of locale.
  • +
  • Glassfish 4.0 is no longer supported
  • +
+

1.8.0

Bug fix release - with new style deployment

    diff --git a/src/test/install/.gitignore b/src/test/install/.gitignore index a76f0ccf02be042503610644591c2550736db82a..4765a9f8eb44efe9c85013c67e0da9393b2f1d1f 100644 --- a/src/test/install/.gitignore +++ b/src/test/install/.gitignore @@ -1,3 +1,4 @@ *.war setup* -run* \ No newline at end of file +run* +logback.xml diff --git a/src/test/java/org/icatproject/ids/TidierTest.java b/src/test/java/org/icatproject/ids/TidierTest.java index da3eabccd0fb0fa2b1f1bb9471a2debf2f761aa0..d6c98caa381242c6a674ddc75789185dfe23684e 100644 --- a/src/test/java/org/icatproject/ids/TidierTest.java +++ b/src/test/java/org/icatproject/ids/TidierTest.java @@ -2,6 +2,7 @@ package org.icatproject.ids; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; import java.io.ByteArrayInputStream; import java.nio.file.Files; @@ -82,4 +83,22 @@ public class TidierTest { assertFalse(Files.exists(pf)); } + @Test + public void testAddStringConstraint() throws Exception { + StringBuilder sb1 = new StringBuilder(); + boolean andNeeded = Tidier.addStringConstraint(sb1, "df.location", "/path/to/normal/file", false); + assertEquals(" df.location = '/path/to/normal/file'", sb1.toString()); + + /* Fix error where a file path contains an apostrophe */ + StringBuilder sb2 = new StringBuilder(); + andNeeded = Tidier.addStringConstraint(sb2, "df.location", "/path/to/Person's Files/myscript.py", false); + assertEquals(" df.location = '/path/to/Person''s Files/myscript.py'", sb2.toString()); + } + + @Test + public void testAddNumericConstraint() throws Exception { + StringBuilder sb3 = new StringBuilder(); + boolean andNeeded = Tidier.addNumericConstraint(sb3, "df.id", 12345L, false); + assertEquals(" df.id = 12345", sb3.toString()); + } } diff --git a/src/test/java/org/icatproject/ids/integration/BaseTest.java b/src/test/java/org/icatproject/ids/integration/BaseTest.java index 6356bbfd7f7599265b94dbd0814eb22be29614fc..2841700bfec904f38aad3072f99ee6701feb0539 100644 --- a/src/test/java/org/icatproject/ids/integration/BaseTest.java +++ b/src/test/java/org/icatproject/ids/integration/BaseTest.java @@ -293,7 +293,7 @@ public class BaseTest { Datafile df4 = new Datafile(); df4.setName("df4_" + timestamp); - df4.setLocation(ds2Loc + UUID.randomUUID()); + df4.setLocation(ds2Loc + "Person's file"); df4.setDataset(ds2); writeToFile(df4, "df4 test content very compressible very compressible", key); @@ -369,7 +369,7 @@ public class BaseTest { assertTrue(found); } - private void writeToFile(Datafile df, String content, String key) + protected void writeToFile(Datafile df, String content, String key) throws IOException, IcatException_Exception, NoSuchAlgorithmException { Path path = setup.getStorageDir().resolve(df.getLocation()); Files.createDirectories(path.getParent()); @@ -485,7 +485,7 @@ public class BaseTest { } - static String getLocationFromDigest(Long id, String locationWithHash) { + protected String getLocationFromDigest(Long id, String locationWithHash) { int i = locationWithHash.lastIndexOf(' '); return locationWithHash.substring(0, i); } diff --git a/src/test/java/org/icatproject/ids/integration/one/BogusDatafileTest.java b/src/test/java/org/icatproject/ids/integration/one/BogusDatafileTest.java new file mode 100644 index 0000000000000000000000000000000000000000..34441d9df8643bfcf357874bdb4643a3c1858345 --- /dev/null +++ b/src/test/java/org/icatproject/ids/integration/one/BogusDatafileTest.java @@ -0,0 +1,126 @@ +package org.icatproject.ids.integration.one; + +import java.io.InputStream; +import java.util.Collections; + +import static org.junit.Assert.assertEquals; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +import org.icatproject.Datafile; +import org.icatproject.Dataset; +import org.icatproject.ids.integration.BaseTest; +import org.icatproject.ids.integration.util.Setup; +import org.icatproject.ids.integration.util.client.DataSelection; +import org.icatproject.ids.integration.util.client.NotFoundException; +import org.icatproject.ids.integration.util.client.TestingClient.Flag; +import org.icatproject.ids.integration.util.client.TestingClient.Status; + +/* + * Issue #63: Internal error is raised trying to restore a dataset + * with datafiles not uploaded to IDS + * + * ids.server gets confused if datafiles do not exist in the storage, + * e.g. they are created in ICAT without having location set. + * + * Desired behavior: such bogus datafiles should be ignored. + */ + +public class BogusDatafileTest extends BaseTest { + + @BeforeClass + public static void setup() throws Exception { + setup = new Setup("one.properties"); + icatsetup(); + } + + @Before + public void createBogusFiles() throws Exception { + long timestamp = System.currentTimeMillis(); + + Dataset ds1 = (Dataset) icatWS.get(sessionId, "Dataset", datasetIds.get(0)); + Datafile dfb1 = new Datafile(); + dfb1.setName("dfbogus1_" + timestamp); + dfb1.setFileSize(42L); + dfb1.setDataset(ds1); + dfb1.setId(icatWS.create(sessionId, dfb1)); + + Dataset ds2 = (Dataset) icatWS.get(sessionId, "Dataset", datasetIds.get(1)); + Datafile dfb2 = new Datafile(); + dfb2.setName("dfbogus2_" + timestamp); + dfb2.setFileSize(42L); + dfb2.setDataset(ds2); + dfb2.setId(icatWS.create(sessionId, dfb2)); + + Dataset ds3 = (Dataset) icatWS.get(sessionId, "Dataset", datasetIds.get(2)); + Datafile dfb3 = new Datafile(); + dfb3.setName("dfbogus3_" + timestamp); + dfb3.setFileSize(42L); + dfb3.setDataset(ds3); + dfb3.setId(icatWS.create(sessionId, dfb3)); + + datafileIds.add(dfb1.getId()); + datafileIds.add(dfb2.getId()); + datafileIds.add(dfb3.getId()); + } + + @Test + public void getEmptyDataset() throws Exception { + + DataSelection selection = new DataSelection().addDataset(datasetIds.get(2)); + + try (InputStream stream = testingClient.getData(sessionId, selection, Flag.NONE, 0, null)) { + checkZipStream(stream, Collections.emptyList(), 57, 0); + } + + } + + @Test + public void getSizeEmptyDataset() throws Exception { + + DataSelection selection = new DataSelection().addDataset(datasetIds.get(2)); + assertEquals(0L, testingClient.getSize(sessionId, selection, 200)); + + } + + @Test + public void getNonEmptyDataset() throws Exception { + + DataSelection selection = new DataSelection().addDataset(datasetIds.get(0)); + + try (InputStream stream = testingClient.getData(sessionId, selection, Flag.NONE, 0, null)) { + checkZipStream(stream, datafileIds.subList(0, 2), 57, 0); + } + + } + + @Test + public void getSizeNonEmptyDataset() throws Exception { + + DataSelection selection = new DataSelection().addDataset(datasetIds.get(0)); + assertEquals(104L, testingClient.getSize(sessionId, selection, 200)); + + } + + @Test(expected = NotFoundException.class) + public void getBogusFile() throws Exception { + + DataSelection selection = new DataSelection().addDatafile(datafileIds.get(5)); + + try (InputStream stream = testingClient.getData(sessionId, selection, Flag.NONE, 0, 404)) { + checkZipStream(stream, Collections.emptyList(), 57, 0); + } + + } + + @Test(expected = NotFoundException.class) + public void getSizeBogusFile() throws Exception { + + DataSelection selection = new DataSelection().addDatafile(datafileIds.get(5)); + testingClient.getSize(sessionId, selection, 404); + + } + +} diff --git a/src/test/java/org/icatproject/ids/integration/one/FileCheckerTest.java b/src/test/java/org/icatproject/ids/integration/one/FileCheckerTest.java index 39dc40aea646fce19a9143cbe0fc4e779d94fcf8..31d0aa8458f845e81282a4dcf507d02c99b532e9 100644 --- a/src/test/java/org/icatproject/ids/integration/one/FileCheckerTest.java +++ b/src/test/java/org/icatproject/ids/integration/one/FileCheckerTest.java @@ -43,9 +43,8 @@ public class FileCheckerTest extends BaseTest { Long dfid = 0L; for (int i = 0; i < 3; i++) { - dfid = testingClient.put(sessionId, Files.newInputStream(newFileLocation), - "uploaded_file_" + i, datasetIds.get(0), supportedDatafileFormat.getId(), - "A rather splendid datafile", 201); + dfid = testingClient.put(sessionId, Files.newInputStream(newFileLocation), "uploaded_file_" + i, + datasetIds.get(0), supportedDatafileFormat.getId(), "A rather splendid datafile", 201); } Datafile df = (Datafile) icatWS.get(sessionId, "Datafile INCLUDE 1", dfid); @@ -82,21 +81,16 @@ public class FileCheckerTest extends BaseTest { Files.deleteIfExists(errorLog); checkHas("Datafile", dfid, "Zoo\" does not contain hash."); - df.setLocation(null); - icatWS.update(sessionId, df); - Files.deleteIfExists(errorLog); - checkHas("Datafile", dfid, "location null"); - } - private void checkHas(String type, Long id, String message) throws IOException, - InterruptedException { + private void checkHas(String type, Long id, String message) throws IOException, InterruptedException { Set lines = new HashSet(); while (!Files.exists(errorLog)) { Thread.sleep(10); } for (String line : Files.readAllLines(errorLog, Charset.defaultCharset())) { - lines.add(line.substring(22)); + int n = line.indexOf(": ") + 2; + lines.add(line.substring(n)); } assertEquals(1, lines.size()); String msg = new ArrayList(lines).get(0); diff --git a/src/test/java/org/icatproject/ids/integration/one/WriteTest.java b/src/test/java/org/icatproject/ids/integration/one/WriteTest.java new file mode 100644 index 0000000000000000000000000000000000000000..4a20a34f86ecb1b7815098a56f692d88f0a5012e --- /dev/null +++ b/src/test/java/org/icatproject/ids/integration/one/WriteTest.java @@ -0,0 +1,26 @@ +package org.icatproject.ids.integration.one; + +import org.icatproject.ids.integration.BaseTest; +import org.icatproject.ids.integration.util.Setup; +import org.icatproject.ids.integration.util.client.DataSelection; +import org.junit.BeforeClass; +import org.junit.Test; + +public class WriteTest extends BaseTest { + + @BeforeClass + public static void setup() throws Exception { + setup = new Setup("one.properties"); + icatsetup(); + } + + /** + * For one level storage, the write call is basically a noop. + * Just verify that it does not throw an error. + */ + @Test + public void writeDataset() throws Exception { + testingClient.write(sessionId, new DataSelection().addDataset(datasetIds.get(0)), 204); + } + +} diff --git a/src/test/java/org/icatproject/ids/integration/two/BogusDatafileTest.java b/src/test/java/org/icatproject/ids/integration/two/BogusDatafileTest.java new file mode 100644 index 0000000000000000000000000000000000000000..6ee96c97da0e583964e29432979a167045debfc0 --- /dev/null +++ b/src/test/java/org/icatproject/ids/integration/two/BogusDatafileTest.java @@ -0,0 +1,188 @@ +package org.icatproject.ids.integration.two; + +import java.io.File; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +import org.icatproject.Datafile; +import org.icatproject.Dataset; +import org.icatproject.ids.integration.BaseTest; +import org.icatproject.ids.integration.util.Setup; +import org.icatproject.ids.integration.util.client.DataSelection; +import org.icatproject.ids.integration.util.client.NotFoundException; +import org.icatproject.ids.integration.util.client.TestingClient.Flag; +import org.icatproject.ids.integration.util.client.TestingClient.Status; + +/* + * Issue #63: Internal error is raised trying to restore a dataset + * with datafiles not uploaded to IDS + * + * ids.server gets confused if datafiles do not exist in the storage, + * e.g. they are created in ICAT without having location set. + * + * Desired behavior: such bogus datafiles should be ignored. + */ + +public class BogusDatafileTest extends BaseTest { + + private static long timestamp = System.currentTimeMillis(); + + @BeforeClass + public static void setup() throws Exception { + setup = new Setup("two.properties"); + icatsetup(); + } + + @Before + public void createBogusFiles() throws Exception { + + Dataset ds1 = (Dataset) icatWS.get(sessionId, "Dataset", datasetIds.get(0)); + Datafile dfb1 = new Datafile(); + dfb1.setName("dfbogus1_" + timestamp); + dfb1.setFileSize(42L); + dfb1.setDataset(ds1); + dfb1.setId(icatWS.create(sessionId, dfb1)); + + Dataset ds2 = (Dataset) icatWS.get(sessionId, "Dataset", datasetIds.get(1)); + Datafile dfb2 = new Datafile(); + dfb2.setName("dfbogus2_" + timestamp); + dfb2.setFileSize(42L); + dfb2.setDataset(ds2); + dfb2.setId(icatWS.create(sessionId, dfb2)); + + Dataset ds3 = (Dataset) icatWS.get(sessionId, "Dataset", datasetIds.get(2)); + Datafile dfb3 = new Datafile(); + dfb3.setName("dfbogus3_" + timestamp); + dfb3.setFileSize(42L); + dfb3.setDataset(ds3); + dfb3.setId(icatWS.create(sessionId, dfb3)); + + datafileIds.add(dfb1.getId()); + datafileIds.add(dfb2.getId()); + datafileIds.add(dfb3.getId()); + + } + + @Test + public void getEmptyDataset() throws Exception { + + DataSelection selection = new DataSelection().addDataset(datasetIds.get(2)); + + try (InputStream stream = testingClient.getData(sessionId, selection, Flag.NONE, 0, null)) { + checkZipStream(stream, Collections.emptyList(), 57, 0); + } + + } + + @Test + public void getSizeEmptyDataset() throws Exception { + + DataSelection selection = new DataSelection().addDataset(datasetIds.get(2)); + assertEquals(0L, testingClient.getSize(sessionId, selection, 200)); + + } + + @Test + public void getNonEmptyDataset() throws Exception { + + DataSelection selection = new DataSelection().addDataset(datasetIds.get(0)); + + testingClient.restore(sessionId, selection, 204); + + waitForIds(); + assertEquals(Status.ONLINE, testingClient.getStatus(sessionId, selection, null)); + + try (InputStream stream = testingClient.getData(sessionId, selection, Flag.NONE, 0, null)) { + checkZipStream(stream, datafileIds.subList(0, 2), 57, 0); + } + + } + + @Test + public void getSizeNonEmptyDataset() throws Exception { + + DataSelection selection = new DataSelection().addDataset(datasetIds.get(0)); + assertEquals(104L, testingClient.getSize(sessionId, selection, 200)); + + } + + @Test(expected = NotFoundException.class) + public void getBogusFile() throws Exception { + + DataSelection selection = new DataSelection().addDatafile(datafileIds.get(5)); + + testingClient.restore(sessionId, selection, 404); + + waitForIds(); + assertEquals(Status.ONLINE, testingClient.getStatus(sessionId, selection, 404)); + + try (InputStream stream = testingClient.getData(sessionId, selection, Flag.NONE, 0, 404)) { + checkZipStream(stream, Collections.emptyList(), 57, 0); + } + + } + + @Test(expected = NotFoundException.class) + public void getSizeBogusFile() throws Exception { + + DataSelection selection = new DataSelection().addDatafile(datafileIds.get(5)); + testingClient.getSize(sessionId, selection, 404); + + } + + /* + * Try the full cycle: upload a new file into a dataset having a bogus file, which triggers a write of the + * dataset to archive storage, archive the dataset, and restore it. Each step must deal gracefully with + * the bogus file in the dataset. + */ + @Test + public void putWriteArchiveRestore() throws Exception { + + Long dsId = datasetIds.get(0); + DataSelection selection = new DataSelection().addDataset(dsId); + Path dirOnFastStorage = getDirOnFastStorage(dsId); + + testingClient.restore(sessionId, selection, 204); + waitForIds(); + assertTrue(Files.exists(dirOnFastStorage)); + + Long dfulId = testingClient.put(sessionId, Files.newInputStream(newFileLocation), + "uploaded_file_" + timestamp, dsId, supportedDatafileFormat.getId(), + "A rather splendid datafile", 201); + Datafile dful = (Datafile) icatWS.get(sessionId, "Datafile", dfulId); + testingClient.archive(sessionId, selection, 204); + waitForIds(); + assertFalse(Files.exists(dirOnFastStorage)); + + testingClient.restore(sessionId, selection, 204); + waitForIds(); + assertTrue(Files.exists(dirOnFastStorage)); + + File[] filesList = dirOnFastStorage.toFile().listFiles(); + assertEquals(3, filesList.length); + Set locations = new HashSet<>(); + Datafile df1 = (Datafile) icatWS.get(sessionId, "Datafile", datafileIds.get(0)); + locations.add(getLocationFromDigest(df1.getId(), df1.getLocation())); + Datafile df2 = (Datafile) icatWS.get(sessionId, "Datafile", datafileIds.get(1)); + locations.add(getLocationFromDigest(df2.getId(), df2.getLocation())); + locations.add(getLocationFromDigest(dful.getId(), dful.getLocation())); + for (File file : filesList) { + String location = setup.getStorageDir().relativize(file.toPath()).toString(); + assertTrue(locations.contains(location)); + } + + } + +} diff --git a/src/test/java/org/icatproject/ids/integration/two/DeleteTest.java b/src/test/java/org/icatproject/ids/integration/two/DeleteTest.java index 75a97a59fa211b7bcb0f7557a746914f6175a978..f380a820ac2fdd91c07f4ed7a7d4daf8b1f00313 100644 --- a/src/test/java/org/icatproject/ids/integration/two/DeleteTest.java +++ b/src/test/java/org/icatproject/ids/integration/two/DeleteTest.java @@ -19,7 +19,7 @@ public class DeleteTest extends BaseTest { @Test(expected = DataNotOnlineException.class) public void deleteFromUnrestoredDataset() throws Exception { - testingClient.delete(sessionId, new DataSelection().addDataset(datasetIds.get(0)), 404); + testingClient.delete(sessionId, new DataSelection().addDataset(datasetIds.get(0)), 503); } @Test diff --git a/src/test/java/org/icatproject/ids/integration/two/FileCheckerTest.java b/src/test/java/org/icatproject/ids/integration/two/FileCheckerTest.java index ceaaceb8ae1a5a5bad39b35ca8af0c305dc4d106..d2713fd74a68d5bafaeda756691bf9be52ee2c21 100644 --- a/src/test/java/org/icatproject/ids/integration/two/FileCheckerTest.java +++ b/src/test/java/org/icatproject/ids/integration/two/FileCheckerTest.java @@ -196,7 +196,8 @@ public class FileCheckerTest extends BaseTest { Thread.sleep(10); } for (String line : Files.readAllLines(errorLog, Charset.defaultCharset())) { - lines.add(line.substring(22)); + int n = line.indexOf(": ") + 2; + lines.add(line.substring(n)); } assertEquals(1, lines.size()); String msg = new ArrayList(lines).get(0); diff --git a/src/test/java/org/icatproject/ids/integration/two/GetDataExplicitTest.java b/src/test/java/org/icatproject/ids/integration/two/GetDataExplicitTest.java index 0b814d130982f3689d8a02c4a77c6a8185b77cbf..178ea61a44d9edda320fc994c6d887fd98a43e54 100644 --- a/src/test/java/org/icatproject/ids/integration/two/GetDataExplicitTest.java +++ b/src/test/java/org/icatproject/ids/integration/two/GetDataExplicitTest.java @@ -64,7 +64,7 @@ public class GetDataExplicitTest extends BaseTest { public void correctBehaviourTest() throws Exception { try (InputStream z = testingClient.getData(sessionId, new DataSelection().addDatafiles(datafileIds), Flag.NONE, - 0, 404)) { + 0, 503)) { fail("Should have thrown exception"); } catch (IdsException e) { assertEquals(DataNotOnlineException.class, e.getClass()); @@ -123,7 +123,7 @@ public class GetDataExplicitTest extends BaseTest { public void gettingDatafileAndDatasetShouldRestoreBothDatasetsTest() throws Exception { try (InputStream z = testingClient.getData(sessionId, new DataSelection().addDatafile(datafileIds.get(2)) - .addDataset(datasetIds.get(0)), Flag.NONE, 0, 404)) { + .addDataset(datasetIds.get(0)), Flag.NONE, 0, 503)) { fail("Should throw exception"); } catch (DataNotOnlineException e) { // All is well diff --git a/src/test/java/org/icatproject/ids/integration/two/LinkTest.java b/src/test/java/org/icatproject/ids/integration/two/LinkTest.java index 17356794d72825458ae8aa37b82e29654fb16d61..0d130be6781716f1976ee0abeaeafb41f7669c42 100644 --- a/src/test/java/org/icatproject/ids/integration/two/LinkTest.java +++ b/src/test/java/org/icatproject/ids/integration/two/LinkTest.java @@ -26,7 +26,7 @@ public class LinkTest extends BaseTest { String username = System.getProperty("user.name"); try { - testingClient.getLink(sessionId, datafileIds.get(0), username, 404); + testingClient.getLink(sessionId, datafileIds.get(0), username, 503); fail("Should have thrown an exception"); } catch (DataNotOnlineException e) { // All is well diff --git a/src/test/java/org/icatproject/ids/integration/two/PutTest.java b/src/test/java/org/icatproject/ids/integration/two/PutTest.java index 9928ecb576eb34ebfa8278d5335775a558474052..61c63a62c9187cf06dcfc93d8103c2f51db36281 100644 --- a/src/test/java/org/icatproject/ids/integration/two/PutTest.java +++ b/src/test/java/org/icatproject/ids/integration/two/PutTest.java @@ -34,7 +34,7 @@ public class PutTest extends BaseTest { public void putToUnrestoredDataset() throws Exception { testingClient.put(sessionId, Files.newInputStream(newFileLocation), "uploaded_file1_" - + timestamp, datasetIds.get(0), supportedDatafileFormat.getId(), null, 404); + + timestamp, datasetIds.get(0), supportedDatafileFormat.getId(), null, 503); } @Test diff --git a/src/test/java/org/icatproject/ids/integration/two/RestoreErrorsTest.java b/src/test/java/org/icatproject/ids/integration/two/RestoreErrorsTest.java new file mode 100644 index 0000000000000000000000000000000000000000..c894363563474479484abb524cdd3f8d2f7432e0 --- /dev/null +++ b/src/test/java/org/icatproject/ids/integration/two/RestoreErrorsTest.java @@ -0,0 +1,146 @@ +package org.icatproject.ids.integration.two; + +/* + * Test various error conditions in the DsRestorer caused by ZIP files + * in archive storage having unexpected content. + */ + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.zip.ZipEntry; +import java.util.zip.ZipInputStream; +import java.util.zip.ZipOutputStream; + +import org.junit.BeforeClass; +import org.junit.Test; + +import org.icatproject.Datafile; +import org.icatproject.Dataset; +import org.icatproject.Facility; +import org.icatproject.IcatException_Exception; +import org.icatproject.Investigation; +import org.icatproject.ids.integration.BaseTest; +import org.icatproject.ids.integration.util.Setup; +import org.icatproject.ids.integration.util.client.DataSelection; +import org.icatproject.ids.integration.util.client.InternalException; +import org.icatproject.ids.integration.util.client.TestingClient.Status; + +public class RestoreErrorsTest extends BaseTest { + + @BeforeClass + public static void setup() throws Exception { + setup = new Setup("two.properties"); + icatsetup(); + } + + /* + * Note that we cannot test for DUPLICATE_ENTRY here, because ZipOutputStream() won't allow + * us to create such a defective ZIP file. But that doesn't mean that this error cannot + * occur. + */ + private enum Defect { + NONE, MISSING_ENTRY, SPURIOUS_ENTRY, DUPLICATE_ENTRY + } + + private void cloneZip(Path archivepath, Defect defect) throws IOException { + Path savepath = archivepath.getParent().resolve(".sav"); + Files.move(archivepath, savepath); + try (ZipOutputStream zipout = new ZipOutputStream(Files.newOutputStream(archivepath))) { + try (ZipInputStream zipin = new ZipInputStream(Files.newInputStream(savepath))) { + ZipEntry entry = zipin.getNextEntry(); + boolean first = true; + String entryName = ""; + while (entry != null) { + if (first && defect == Defect.MISSING_ENTRY) { + entry = zipin.getNextEntry(); + } + first = false; + entryName = entry.getName(); + zipout.putNextEntry(new ZipEntry(entryName)); + byte[] bytes = new byte[8192]; + int length; + while ((length = zipin.read(bytes)) >= 0) { + zipout.write(bytes, 0, length); + } + zipout.closeEntry(); + entry = zipin.getNextEntry(); + } + } + if (defect == Defect.SPURIOUS_ENTRY) { + zipout.putNextEntry(new ZipEntry("ids/spurious_entry")); + byte[] bytes = new byte[64]; + zipout.write(bytes, 0, 64); + zipout.closeEntry(); + } + } + } + + /* + * As a reference: a restore with no errors. + */ + @Test + public void restoreOk() throws Exception { + Long dsId = datasetIds.get(1); + Path archivefile = getFileOnArchiveStorage(dsId); + Path dirOnFastStorage = getDirOnFastStorage(dsId); + DataSelection selection = new DataSelection().addDataset(dsId); + cloneZip(archivefile, Defect.NONE); + testingClient.restore(sessionId, selection, 204); + waitForIds(); + checkPresent(dirOnFastStorage); + } + + /* + * A missing entry in the archive. + */ + @Test + public void restoreMissing() throws Exception { + Long dsId = datasetIds.get(1); + Path archivefile = getFileOnArchiveStorage(dsId); + Path dirOnFastStorage = getDirOnFastStorage(dsId); + DataSelection selection = new DataSelection().addDataset(dsId); + cloneZip(archivefile, Defect.MISSING_ENTRY); + testingClient.restore(sessionId, selection, 204); + waitForIds(); + checkAbsent(dirOnFastStorage); + try { + testingClient.getStatus(sessionId, selection, null); + fail("Expected InternalException to be thrown."); + } catch (InternalException e) { + assertEquals("Restore failed", e.getMessage()); + } + testingClient.reset(sessionId, selection, 204); + Status status = testingClient.getStatus(sessionId, selection, 200); + assertEquals(Status.ARCHIVED, status); + } + + /* + * A spurious entry in the archive. + */ + @Test + public void restoreSpurious() throws Exception { + Long dsId = datasetIds.get(1); + Path archivefile = getFileOnArchiveStorage(dsId); + Path dirOnFastStorage = getDirOnFastStorage(dsId); + DataSelection selection = new DataSelection().addDataset(dsId); + cloneZip(archivefile, Defect.SPURIOUS_ENTRY); + testingClient.restore(sessionId, selection, 204); + waitForIds(); + checkAbsent(dirOnFastStorage); + try { + testingClient.getStatus(sessionId, selection, null); + fail("Expected InternalException to be thrown."); + } catch (InternalException e) { + assertEquals("Restore failed", e.getMessage()); + } + testingClient.reset(sessionId, selection, 204); + Status status = testingClient.getStatus(sessionId, selection, 200); + assertEquals(Status.ARCHIVED, status); + } +} diff --git a/src/test/java/org/icatproject/ids/integration/two/WriteTest.java b/src/test/java/org/icatproject/ids/integration/two/WriteTest.java new file mode 100644 index 0000000000000000000000000000000000000000..ffed6a82ade9e55c73b9bfc379cb1a97ec3693c0 --- /dev/null +++ b/src/test/java/org/icatproject/ids/integration/two/WriteTest.java @@ -0,0 +1,124 @@ +package org.icatproject.ids.integration.two; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.UUID; + +import org.icatproject.Datafile; +import org.icatproject.Dataset; +import org.icatproject.DatasetType; +import org.icatproject.Investigation; +import org.icatproject.ids.integration.BaseTest; +import org.icatproject.ids.integration.util.Setup; +import org.icatproject.ids.integration.util.client.BadRequestException; +import org.icatproject.ids.integration.util.client.DataNotOnlineException; +import org.icatproject.ids.integration.util.client.DataSelection; +import org.icatproject.ids.integration.util.client.InsufficientPrivilegesException; +import org.junit.BeforeClass; +import org.junit.Test; + +public class WriteTest extends BaseTest { + + @BeforeClass + public static void setup() throws Exception { + setup = new Setup("two.properties"); + icatsetup(); + } + + /** + * In principle, it's always possible to do a write call on + * existing datasets, but it will have no visible effect. + */ + @Test + public void restoreThenWriteDataset() throws Exception { + + Long dsId = datasetIds.get(0); + Path dirOnFastStorage = getDirOnFastStorage(dsId); + DataSelection selection = new DataSelection().addDataset(dsId); + + testingClient.restore(sessionId, selection, 204); + waitForIds(); + checkPresent(dirOnFastStorage); + + testingClient.write(sessionId, selection, 204); + + } + + /** + * Create a dataset in ICAT, store the files in main storage, + * then do a write call to IDS to get the dataset written to + * archive storage. + */ + @Test + public void storeThenWrite() throws Exception { + + long timestamp = System.currentTimeMillis(); + + Investigation inv = (Investigation) icatWS.get(sessionId, "Investigation INCLUDE Facility", investigationId); + String invLoc = inv.getId() + "/"; + DatasetType dsType = (DatasetType) icatWS.search(sessionId, "DatasetType").get(0); + + Dataset ds = new Dataset(); + ds.setName("dsWrite_" + timestamp); + ds.setComplete(false); + ds.setType(dsType); + ds.setInvestigation(inv); + ds.setId(icatWS.create(sessionId, ds)); + String dsLoc = invLoc + ds.getId() + "/"; + + Datafile df = new Datafile(); + df.setName("dfWrite_" + timestamp); + df.setLocation(dsLoc + UUID.randomUUID()); + df.setDataset(ds); + writeToFile(df, "some really boring datafile test content", setup.getKey()); + + Long dsId = ds.getId(); + Path dirOnFastStorage = getDirOnFastStorage(dsId); + Path fileOnArchiveStorage = getFileOnArchiveStorage(dsId); + DataSelection selection = new DataSelection().addDataset(dsId); + + checkPresent(dirOnFastStorage); + + testingClient.write(sessionId, selection, 204); + waitForIds(); + + ArrayList list = new ArrayList(); + list.add(df.getId()); + checkZipFile(fileOnArchiveStorage, list, 42); + + } + + /** + * Write fails if the dataset is not online. + */ + @Test(expected = DataNotOnlineException.class) + public void notOnlineTest() throws Exception { + Long dsId = datasetIds.get(0); + Path dirOnFastStorage = getDirOnFastStorage(dsId); + DataSelection selection = new DataSelection().addDataset(dsId); + testingClient.archive(sessionId, selection, 204); + waitForIds(); + checkAbsent(dirOnFastStorage); + testingClient.write(sessionId, selection, 503); + } + + @Test(expected = BadRequestException.class) + public void badSessionIdFormatTest() throws Exception { + testingClient.write("bad sessionId format", + new DataSelection().addDatafiles(Arrays.asList(1L, 2L)), 400); + } + + @Test + public void noIdsTest() throws Exception { + testingClient.write(sessionId, new DataSelection(), 204); + } + + @Test(expected = InsufficientPrivilegesException.class) + public void nonExistingSessionIdTest() throws Exception { + testingClient.write("aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + new DataSelection().addDatafiles(Arrays.asList(1L, 2L)), 403); + } + +} diff --git a/src/test/java/org/icatproject/ids/integration/twodf/BogusDatafileTest.java b/src/test/java/org/icatproject/ids/integration/twodf/BogusDatafileTest.java new file mode 100644 index 0000000000000000000000000000000000000000..69e5303da82f04ebf4af722355e0872381361662 --- /dev/null +++ b/src/test/java/org/icatproject/ids/integration/twodf/BogusDatafileTest.java @@ -0,0 +1,188 @@ +package org.icatproject.ids.integration.twodf; + +import java.io.File; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +import org.icatproject.Datafile; +import org.icatproject.Dataset; +import org.icatproject.ids.integration.BaseTest; +import org.icatproject.ids.integration.util.Setup; +import org.icatproject.ids.integration.util.client.DataSelection; +import org.icatproject.ids.integration.util.client.NotFoundException; +import org.icatproject.ids.integration.util.client.TestingClient.Flag; +import org.icatproject.ids.integration.util.client.TestingClient.Status; + +/* + * Issue #63: Internal error is raised trying to restore a dataset + * with datafiles not uploaded to IDS + * + * ids.server gets confused if datafiles do not exist in the storage, + * e.g. they are created in ICAT without having location set. + * + * Desired behavior: such bogus datafiles should be ignored. + */ + +public class BogusDatafileTest extends BaseTest { + + private static long timestamp = System.currentTimeMillis(); + + @BeforeClass + public static void setup() throws Exception { + setup = new Setup("twodf.properties"); + icatsetup(); + } + + @Before + public void createBogusFiles() throws Exception { + + Dataset ds1 = (Dataset) icatWS.get(sessionId, "Dataset", datasetIds.get(0)); + Datafile dfb1 = new Datafile(); + dfb1.setName("dfbogus1_" + timestamp); + dfb1.setFileSize(42L); + dfb1.setDataset(ds1); + dfb1.setId(icatWS.create(sessionId, dfb1)); + + Dataset ds2 = (Dataset) icatWS.get(sessionId, "Dataset", datasetIds.get(1)); + Datafile dfb2 = new Datafile(); + dfb2.setName("dfbogus2_" + timestamp); + dfb2.setFileSize(42L); + dfb2.setDataset(ds2); + dfb2.setId(icatWS.create(sessionId, dfb2)); + + Dataset ds3 = (Dataset) icatWS.get(sessionId, "Dataset", datasetIds.get(2)); + Datafile dfb3 = new Datafile(); + dfb3.setName("dfbogus3_" + timestamp); + dfb3.setFileSize(42L); + dfb3.setDataset(ds3); + dfb3.setId(icatWS.create(sessionId, dfb3)); + + datafileIds.add(dfb1.getId()); + datafileIds.add(dfb2.getId()); + datafileIds.add(dfb3.getId()); + + } + + @Test + public void getEmptyDataset() throws Exception { + + DataSelection selection = new DataSelection().addDataset(datasetIds.get(2)); + + try (InputStream stream = testingClient.getData(sessionId, selection, Flag.NONE, 0, null)) { + checkZipStream(stream, Collections.emptyList(), 57, 0); + } + + } + + @Test + public void getSizeEmptyDataset() throws Exception { + + DataSelection selection = new DataSelection().addDataset(datasetIds.get(2)); + assertEquals(0L, testingClient.getSize(sessionId, selection, 200)); + + } + + @Test + public void getNonEmptyDataset() throws Exception { + + DataSelection selection = new DataSelection().addDataset(datasetIds.get(0)); + + testingClient.restore(sessionId, selection, 204); + + waitForIds(); + assertEquals(Status.ONLINE, testingClient.getStatus(sessionId, selection, null)); + + try (InputStream stream = testingClient.getData(sessionId, selection, Flag.NONE, 0, null)) { + checkZipStream(stream, datafileIds.subList(0, 2), 57, 0); + } + + } + + @Test + public void getSizeNonEmptyDataset() throws Exception { + + DataSelection selection = new DataSelection().addDataset(datasetIds.get(0)); + assertEquals(104L, testingClient.getSize(sessionId, selection, 200)); + + } + + @Test(expected = NotFoundException.class) + public void getBogusFile() throws Exception { + + DataSelection selection = new DataSelection().addDatafile(datafileIds.get(5)); + + testingClient.restore(sessionId, selection, 404); + + waitForIds(); + assertEquals(Status.ONLINE, testingClient.getStatus(sessionId, selection, 404)); + + try (InputStream stream = testingClient.getData(sessionId, selection, Flag.NONE, 0, 404)) { + checkZipStream(stream, Collections.emptyList(), 57, 0); + } + + } + + @Test(expected = NotFoundException.class) + public void getSizeBogusFile() throws Exception { + + DataSelection selection = new DataSelection().addDatafile(datafileIds.get(5)); + testingClient.getSize(sessionId, selection, 404); + + } + + /* + * Try the full cycle: upload a new file into a dataset having a bogus file, which triggers a write of the + * datafile to archive storage, archive the dataset, and restore it. Each step must deal gracefully with + * the bogus file in the dataset. + */ + @Test + public void putWriteArchiveRestore() throws Exception { + + Long dsId = datasetIds.get(0); + DataSelection selection = new DataSelection().addDataset(dsId); + Path dirOnFastStorage = getDirOnFastStorage(dsId); + + testingClient.restore(sessionId, selection, 204); + waitForIds(); + assertTrue(Files.exists(dirOnFastStorage)); + + Long dfulId = testingClient.put(sessionId, Files.newInputStream(newFileLocation), + "uploaded_file_" + timestamp, dsId, supportedDatafileFormat.getId(), + "A rather splendid datafile", 201); + Datafile dful = (Datafile) icatWS.get(sessionId, "Datafile", dfulId); + testingClient.archive(sessionId, selection, 204); + waitForIds(); + assertFalse(Files.exists(dirOnFastStorage)); + + testingClient.restore(sessionId, selection, 204); + waitForIds(); + assertTrue(Files.exists(dirOnFastStorage)); + + File[] filesList = dirOnFastStorage.toFile().listFiles(); + assertEquals(3, filesList.length); + Set locations = new HashSet<>(); + Datafile df1 = (Datafile) icatWS.get(sessionId, "Datafile", datafileIds.get(0)); + locations.add(getLocationFromDigest(df1.getId(), df1.getLocation())); + Datafile df2 = (Datafile) icatWS.get(sessionId, "Datafile", datafileIds.get(1)); + locations.add(getLocationFromDigest(df2.getId(), df2.getLocation())); + locations.add(getLocationFromDigest(dful.getId(), dful.getLocation())); + for (File file : filesList) { + String location = setup.getStorageDir().relativize(file.toPath()).toString(); + assertTrue(locations.contains(location)); + } + + } + +} diff --git a/src/test/java/org/icatproject/ids/integration/twodf/FileCheckerTest.java b/src/test/java/org/icatproject/ids/integration/twodf/FileCheckerTest.java index 0bab66269f3347f756213883a103f98a978bfd74..6c1a63ec360183ced227f7513a41739fa8046414 100644 --- a/src/test/java/org/icatproject/ids/integration/twodf/FileCheckerTest.java +++ b/src/test/java/org/icatproject/ids/integration/twodf/FileCheckerTest.java @@ -43,9 +43,8 @@ public class FileCheckerTest extends BaseTest { Long dfid = 0L; for (int i = 0; i < 3; i++) { - dfid = testingClient.put(sessionId, Files.newInputStream(newFileLocation), - "uploaded_file_" + i, datasetIds.get(0), supportedDatafileFormat.getId(), - "A rather splendid datafile", 201); + dfid = testingClient.put(sessionId, Files.newInputStream(newFileLocation), "uploaded_file_" + i, + datasetIds.get(0), supportedDatafileFormat.getId(), "A rather splendid datafile", 201); } Datafile df = (Datafile) icatWS.get(sessionId, "Datafile INCLUDE 1", dfid); @@ -83,14 +82,14 @@ public class FileCheckerTest extends BaseTest { checkHas("Dataset", datasetIds.get(0), "/" + datasetIds.get(0)); } - private void checkHas(String type, Long id, String message) throws IOException, - InterruptedException { + private void checkHas(String type, Long id, String message) throws IOException, InterruptedException { Set lines = new HashSet(); while (!Files.exists(errorLog)) { Thread.sleep(10); } for (String line : Files.readAllLines(errorLog, Charset.defaultCharset())) { - lines.add(line.substring(22)); + int n = line.indexOf(": ") + 2; + lines.add(line.substring(n)); } assertEquals(1, lines.size()); String msg = new ArrayList(lines).get(0); diff --git a/src/test/java/org/icatproject/ids/integration/twodf/GetDataExplicitTest.java b/src/test/java/org/icatproject/ids/integration/twodf/GetDataExplicitTest.java index 9ef8370e43a964b57e135de527ab68469034049f..de43b6f6df9fe225acc16236f52d5ae9e706e461 100644 --- a/src/test/java/org/icatproject/ids/integration/twodf/GetDataExplicitTest.java +++ b/src/test/java/org/icatproject/ids/integration/twodf/GetDataExplicitTest.java @@ -64,7 +64,7 @@ public class GetDataExplicitTest extends BaseTest { public void correctBehaviourTest() throws Exception { try (InputStream z = testingClient.getData(sessionId, new DataSelection().addDatafiles(datafileIds), Flag.NONE, - 0, 404)) { + 0, 503)) { fail("Should have thrown exception"); } catch (IdsException e) { @@ -87,7 +87,7 @@ public class GetDataExplicitTest extends BaseTest { public void gettingDatafileDoesNotRestoreItsDatasetTest() throws Exception { try (InputStream z = testingClient.getData(sessionId, new DataSelection().addDatafile(datafileIds.get(2)), - Flag.NONE, 0, 404)) { + Flag.NONE, 0, 503)) { fail("Should have thrown an exception"); } catch (DataNotOnlineException e) { // All is well @@ -95,7 +95,7 @@ public class GetDataExplicitTest extends BaseTest { waitForIds(); try (InputStream stream = testingClient.getData(sessionId, new DataSelection().addDatafile(datafileIds.get(3)), - Flag.NONE, 0, 404)) { + Flag.NONE, 0, 503)) { fail("Should have thrown an exception"); } catch (DataNotOnlineException e) { // All is well @@ -107,7 +107,7 @@ public class GetDataExplicitTest extends BaseTest { public void gettingDatasetUsesCacheTest() throws Exception { try (InputStream z = testingClient.getData(sessionId, new DataSelection().addDataset(datasetIds.get(0)), - Flag.NONE, 0, 404)) { + Flag.NONE, 0, 503)) { fail("Should have thrown an exception"); } catch (DataNotOnlineException e) { // All is well @@ -126,7 +126,7 @@ public class GetDataExplicitTest extends BaseTest { public void gettingDatafileAndDatasetShouldNotRestoreBothDatasetsTest() throws Exception { try (InputStream z = testingClient.getData(sessionId, new DataSelection().addDatafile(datafileIds.get(2)) - .addDataset(datasetIds.get(0)), Flag.NONE, 0, 404)) { + .addDataset(datasetIds.get(0)), Flag.NONE, 0, 503)) { fail("Should throw exception"); } catch (DataNotOnlineException e) { // All is well @@ -134,7 +134,7 @@ public class GetDataExplicitTest extends BaseTest { waitForIds(); try (InputStream stream = testingClient.getData(sessionId, new DataSelection().addDatasets(datasetIds), - Flag.NONE, 0, 404)) { + Flag.NONE, 0, 503)) { fail("Should have thrown an exception"); } catch (DataNotOnlineException e) { // All is well diff --git a/src/test/java/org/icatproject/ids/integration/twodf/LinkTest.java b/src/test/java/org/icatproject/ids/integration/twodf/LinkTest.java index 4986652095a0d8dc644ba58a5702d7e3dca89212..53653c4a01cb56c081015b2fee3039f2d0d0bf46 100644 --- a/src/test/java/org/icatproject/ids/integration/twodf/LinkTest.java +++ b/src/test/java/org/icatproject/ids/integration/twodf/LinkTest.java @@ -26,7 +26,7 @@ public class LinkTest extends BaseTest { String username = System.getProperty("user.name"); try { - testingClient.getLink(sessionId, datafileIds.get(0), username, 404); + testingClient.getLink(sessionId, datafileIds.get(0), username, 503); fail("Should have thrown an exception"); } catch (DataNotOnlineException e) { // All is well diff --git a/src/test/java/org/icatproject/ids/integration/twodf/QueueTest.java b/src/test/java/org/icatproject/ids/integration/twodf/QueueTest.java new file mode 100644 index 0000000000000000000000000000000000000000..bab9f241b0e7297478125d885cd2439b74fcdb3e --- /dev/null +++ b/src/test/java/org/icatproject/ids/integration/twodf/QueueTest.java @@ -0,0 +1,47 @@ +package org.icatproject.ids.integration.twodf; + +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; + +import static org.junit.Assert.assertTrue; +import org.junit.BeforeClass; +import org.junit.Test; + +import org.icatproject.ids.integration.BaseTest; +import org.icatproject.ids.integration.util.Setup; +import org.icatproject.ids.integration.util.client.DataSelection; + +public class QueueTest extends BaseTest { + + private static long timestamp = System.currentTimeMillis(); + + @BeforeClass + public static void setup() throws Exception { + setup = new Setup("twodf.properties"); + icatsetup(); + } + + /* + * Arrange for multiple different operations, requiring conflicting + * locks on the same dataset to be processed at the same time. This + * triggers Bug #82. + */ + @Test + public void multiOperationTest() throws Exception { + Long dsId = datasetIds.get(0); + Path dirOnFastStorage = getDirOnFastStorage(dsId); + DataSelection selection = new DataSelection().addDataset(dsId); + + testingClient.restore(sessionId, selection, 204); + waitForIds(); + assertTrue(Files.exists(dirOnFastStorage)); + + Long dfid = testingClient.put(sessionId, Files.newInputStream(newFileLocation), + "uploaded_file_" + timestamp, dsId, supportedDatafileFormat.getId(), + "A rather splendid datafile", 201); + testingClient.archive(sessionId, selection, 204); + waitForIds(); + } + +} diff --git a/src/test/java/org/icatproject/ids/integration/twodf/WriteTest.java b/src/test/java/org/icatproject/ids/integration/twodf/WriteTest.java new file mode 100644 index 0000000000000000000000000000000000000000..7ce627c2f0fc9602a9e26cf47f8237b6f6123a56 --- /dev/null +++ b/src/test/java/org/icatproject/ids/integration/twodf/WriteTest.java @@ -0,0 +1,121 @@ +package org.icatproject.ids.integration.twodf; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.UUID; + +import org.icatproject.Datafile; +import org.icatproject.Dataset; +import org.icatproject.DatasetType; +import org.icatproject.Investigation; +import org.icatproject.ids.integration.BaseTest; +import org.icatproject.ids.integration.util.Setup; +import org.icatproject.ids.integration.util.client.BadRequestException; +import org.icatproject.ids.integration.util.client.DataNotOnlineException; +import org.icatproject.ids.integration.util.client.DataSelection; +import org.icatproject.ids.integration.util.client.InsufficientPrivilegesException; +import org.junit.BeforeClass; +import org.junit.Test; + +public class WriteTest extends BaseTest { + + @BeforeClass + public static void setup() throws Exception { + setup = new Setup("twodf.properties"); + icatsetup(); + } + + /** + * In principle, it's always possible to do a write call on + * existing datasets, but it will have no visible effect. + */ + @Test + public void restoreThenWriteDataset() throws Exception { + + Long dsId = datasetIds.get(0); + Path dirOnFastStorage = getDirOnFastStorage(dsId); + DataSelection selection = new DataSelection().addDataset(dsId); + + testingClient.restore(sessionId, selection, 204); + waitForIds(); + checkPresent(dirOnFastStorage); + + testingClient.write(sessionId, selection, 204); + + } + + /** + * Create a dataset in ICAT, store the files in main storage, + * then do a write call to IDS to get the dataset written to + * archive storage. + */ + @Test + public void storeThenWrite() throws Exception { + + long timestamp = System.currentTimeMillis(); + + Investigation inv = (Investigation) icatWS.get(sessionId, "Investigation INCLUDE Facility", investigationId); + String invLoc = inv.getId() + "/"; + DatasetType dsType = (DatasetType) icatWS.search(sessionId, "DatasetType").get(0); + + Dataset ds = new Dataset(); + ds.setName("dsWrite_" + timestamp); + ds.setComplete(false); + ds.setType(dsType); + ds.setInvestigation(inv); + ds.setId(icatWS.create(sessionId, ds)); + String dsLoc = invLoc + ds.getId() + "/"; + + Datafile df = new Datafile(); + df.setName("dfWrite_" + timestamp); + df.setLocation(dsLoc + UUID.randomUUID()); + df.setDataset(ds); + writeToFile(df, "some really boring datafile test content", setup.getKey()); + + Long dsId = ds.getId(); + Path dirOnFastStorage = getDirOnFastStorage(dsId); + Path dirOnArchiveStorage = getFileOnArchiveStorage(dsId); + DataSelection selection = new DataSelection().addDataset(dsId); + + checkPresent(dirOnFastStorage); + + testingClient.write(sessionId, selection, 204); + waitForIds(); + checkPresent(dirOnArchiveStorage); + + } + + /** + * Write fails if the dataset is not online. + */ + @Test(expected = DataNotOnlineException.class) + public void notOnlineTest() throws Exception { + Long dsId = datasetIds.get(0); + Path dirOnFastStorage = getDirOnFastStorage(dsId); + DataSelection selection = new DataSelection().addDataset(dsId); + testingClient.archive(sessionId, selection, 204); + waitForIds(); + checkAbsent(dirOnFastStorage); + testingClient.write(sessionId, selection, 503); + } + + @Test(expected = BadRequestException.class) + public void badSessionIdFormatTest() throws Exception { + testingClient.write("bad sessionId format", + new DataSelection().addDatafiles(Arrays.asList(1L, 2L)), 400); + } + + @Test + public void noIdsTest() throws Exception { + testingClient.write(sessionId, new DataSelection(), 204); + } + + @Test(expected = InsufficientPrivilegesException.class) + public void nonExistingSessionIdTest() throws Exception { + testingClient.write("aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + new DataSelection().addDatafiles(Arrays.asList(1L, 2L)), 403); + } + +} diff --git a/src/test/java/org/icatproject/ids/integration/util/Setup.java b/src/test/java/org/icatproject/ids/integration/util/Setup.java index eae576382ce0e02efebbcb591d979a71b417fa0c..d915e715f38ef980a737ffa8b58e7be25b6845aa 100644 --- a/src/test/java/org/icatproject/ids/integration/util/Setup.java +++ b/src/test/java/org/icatproject/ids/integration/util/Setup.java @@ -33,6 +33,7 @@ public class Setup { private String goodSessionId = null; private String forbiddenSessionId = null; + private Path home; private Path storageArchiveDir; private Path storageDir; private Path preparedCacheDir; @@ -57,6 +58,13 @@ public class Setup { public Setup(String runPropertyFile) throws Exception { + // Test home directory + String testHome = System.getProperty("testHome"); + if (testHome == null) { + testHome = System.getProperty("user.home"); + } + home = Paths.get(testHome); + // Start by reading the test properties Properties testProps = new Properties(); InputStream is = Setup.class.getClassLoader().getResourceAsStream("test.properties"); @@ -72,15 +80,15 @@ public class Setup { idsUrl = new URL(serverUrl + "/ids"); - String home = System.getProperty("containerHome"); - if (home == null) { + String containerHome = System.getProperty("containerHome"); + if (containerHome == null) { System.err.println("containerHome is not defined as a system property"); } long time = System.currentTimeMillis(); ShellCommand sc = new ShellCommand("src/test/scripts/prepare_test.py", "src/test/resources/" + runPropertyFile, - home, serverUrl); + home.toString(), containerHome, serverUrl); System.out.println(sc.getStdout() + " " + sc.getStderr()); System.out.println( "Setting up " + runPropertyFile + " took " + (System.currentTimeMillis() - time) / 1000. + "seconds"); @@ -91,7 +99,7 @@ public class Setup { if (runProperties.has("key")) { key = runProperties.getString("key"); } - updownDir = new File(new File(System.getProperty("user.home")), testProps.getProperty("updownDir")).toPath(); + updownDir = home.resolve(testProps.getProperty("updownDir")); icatUrl = runProperties.getURL("icat.url"); goodSessionId = login(testProps.getProperty("authorizedIcatUsername"), testProps.getProperty("authorizedIcatPassword")); @@ -120,7 +128,7 @@ public class Setup { } public void setReliability(double d) throws IOException { - Path p = Paths.get(System.getProperty("user.home"), "reliability"); + Path p = home.resolve("reliability"); try (BufferedWriter out = Files.newBufferedWriter(p)) { out.write(d + "\n"); } diff --git a/src/test/java/org/icatproject/ids/integration/util/client/TestingClient.java b/src/test/java/org/icatproject/ids/integration/util/client/TestingClient.java index 1e804bc8dceaea292a1af7e9141f87e565ce84c5..8b7ec6677b4f6a5ee165c828e0d360c011f955c8 100644 --- a/src/test/java/org/icatproject/ids/integration/util/client/TestingClient.java +++ b/src/test/java/org/icatproject/ids/integration/util/client/TestingClient.java @@ -413,8 +413,8 @@ public class TestingClient { serviceStatus.storeOpItems(dsInfo, request); } serviceStatus.setLockedCount(rootNode.getInt("lockCount")); - for (JsonValue num : rootNode.getJsonArray("lockedIds")) { - Long dsId = ((JsonNumber) num).longValueExact(); + for (JsonValue lock : rootNode.getJsonArray("locks")) { + Long dsId = ((JsonObject) lock).getJsonNumber("id").longValueExact(); serviceStatus.storeLockedDs(dsId); } for (JsonValue num : rootNode.getJsonArray("failures")) { @@ -864,4 +864,27 @@ public class TestingClient { } } + public void write(String sessionId, DataSelection data, Integer sc) throws NotImplementedException, + BadRequestException, InsufficientPrivilegesException, InternalException, NotFoundException, + DataNotOnlineException { + + URI uri = getUri(getUriBuilder("write")); + List formparams = new ArrayList<>(); + formparams.add(new BasicNameValuePair("sessionId", sessionId)); + for (Entry entry : data.getParameters().entrySet()) { + formparams.add(new BasicNameValuePair(entry.getKey(), entry.getValue())); + } + try (CloseableHttpClient httpclient = HttpClients.createDefault()) { + HttpPost httpPost = new HttpPost(uri); + httpPost.setEntity(new UrlEncodedFormEntity(formparams)); + try (CloseableHttpResponse response = httpclient.execute(httpPost)) { + expectNothing(response, sc); + } catch (InsufficientStorageException e) { + throw new InternalException(e.getClass() + " " + e.getMessage()); + } + } catch (IOException e) { + throw new InternalException(e.getClass() + " " + e.getMessage()); + } + } + } \ No newline at end of file diff --git a/src/test/resources/one.properties b/src/test/resources/one.properties index 01313315db51fa8ad8eccda51cacad94b9d60a66..0ee27b2fe5ddb107c51e54baa082729d64d35fc8 100644 --- a/src/test/resources/one.properties +++ b/src/test/resources/one.properties @@ -1,9 +1,9 @@ plugin.zipMapper.class = org.icatproject.ids.storage_test.ZipMapper plugin.main.class = org.icatproject.ids.storage_test.MainFileStorage -plugin.main.dir = ${HOME}/ids/main/ +plugin.main.dir = ${HOME}/data/ids/main/ -cache.dir = ${HOME}/ids/cache/ +cache.dir = ${HOME}/data/ids/cache/ preparedCount = 100 processQueueIntervalSeconds = 2 @@ -15,8 +15,8 @@ maxIdsInQuery = 1000 filesCheck.parallelCount = 2 filesCheck.gapSeconds = 3 -filesCheck.lastIdFile = ${HOME}/ids/lastIdFile -filesCheck.errorLog = ${HOME}/ids/errorLog +filesCheck.lastIdFile = ${HOME}/data/ids/lastIdFile +filesCheck.errorLog = ${HOME}/data/ids/errorLog linkLifetimeSeconds = 3600 diff --git a/src/test/resources/test.properties b/src/test/resources/test.properties index 6f5977ee3f5c0c10112cb90bc3f6ebb4a112047f..cf9fc9ebe29b230323217ba74a110fabad453a66 100644 --- a/src/test/resources/test.properties +++ b/src/test/resources/test.properties @@ -9,4 +9,4 @@ unauthorizedIcatUsername=guest unauthorizedIcatPassword=guess # folder, where files should be downloaded to and uploaded from. This is relative to your home directory. -updownDir = icat/updown +updownDir = data/ids/updown diff --git a/src/test/resources/two.properties b/src/test/resources/two.properties index 2a8ce3d283e31370751573f893e288147e788c08..25f2c4e771339b38089feb39bc2cf10015ffffdd 100644 --- a/src/test/resources/two.properties +++ b/src/test/resources/two.properties @@ -1,9 +1,9 @@ plugin.zipMapper.class = org.icatproject.ids.storage_test.ZipMapper plugin.main.class = org.icatproject.ids.storage_test.MainFileStorage -plugin.main.dir = ${HOME}/ids/main/ +plugin.main.dir = ${HOME}/data/ids/main/ -cache.dir = ${HOME}/ids/cache/ +cache.dir = ${HOME}/data/ids/cache/ preparedCount = 100 processQueueIntervalSeconds = 2 rootUserNames db/root @@ -13,8 +13,8 @@ key wombat maxIdsInQuery = 1000 plugin.archive.class = org.icatproject.ids.storage_test.ArchiveFileStorage -plugin.archive.dir = ${HOME}/ids/archive/ -writeDelaySeconds = 6 +plugin.archive.dir = ${HOME}/data/ids/archive/ +delayDatasetWritesSeconds = 6 startArchivingLevel1024bytes = 5000000 stopArchivingLevel1024bytes = 4000000 storageUnit = dataset @@ -22,8 +22,8 @@ tidyBlockSize = 500 filesCheck.parallelCount = 2 filesCheck.gapSeconds = 3 -filesCheck.lastIdFile = ${HOME}/ids/lastIdFile -filesCheck.errorLog = ${HOME}/ids/errorLog +filesCheck.lastIdFile = ${HOME}/data/ids/lastIdFile +filesCheck.errorLog = ${HOME}/data/ids/errorLog linkLifetimeSeconds = 3600 diff --git a/src/test/resources/twodf.properties b/src/test/resources/twodf.properties index afd566cc69699048d2925ee493493b90d6a313f9..b98ea5468f7fab8b46d26196afd02b276ef461d7 100644 --- a/src/test/resources/twodf.properties +++ b/src/test/resources/twodf.properties @@ -1,9 +1,9 @@ plugin.zipMapper.class = org.icatproject.ids.storage_test.ZipMapper plugin.main.class = org.icatproject.ids.storage_test.MainFileStorage -plugin.main.dir = ${HOME}/ids/main/ +plugin.main.dir = ${HOME}/data/ids/main/ -cache.dir = ${HOME}/ids/cache/ +cache.dir = ${HOME}/data/ids/cache/ preparedCount = 100 processQueueIntervalSeconds = 2 rootUserNames db/root @@ -13,8 +13,8 @@ key wombat maxIdsInQuery = 1000 plugin.archive.class = org.icatproject.ids.storage_test.ArchiveFileStorage -plugin.archive.dir = ${HOME}/ids/archive/ -writeDelaySeconds = 6 +plugin.archive.dir = ${HOME}/data/ids/archive/ +delayDatafileOperationsSeconds = 6 startArchivingLevel1024bytes = 5000000 stopArchivingLevel1024bytes = 4000000 storageUnit = datafile @@ -22,8 +22,8 @@ tidyBlockSize = 500 filesCheck.parallelCount = 2 filesCheck.gapSeconds = 3 -filesCheck.lastIdFile = ${HOME}/ids/lastIdFile -filesCheck.errorLog = ${HOME}/ids/errorLog +filesCheck.lastIdFile = ${HOME}/data/ids/lastIdFile +filesCheck.errorLog = ${HOME}/data/ids/errorLog linkLifetimeSeconds = 3600 diff --git a/src/test/scripts/prepare_test.py b/src/test/scripts/prepare_test.py index 246110684ed5e2797c702ae032296b6ed6cd5398..8488160adabb8cdd0c15774d32a95e80b2fd8e76 100755 --- a/src/test/scripts/prepare_test.py +++ b/src/test/scripts/prepare_test.py @@ -1,56 +1,67 @@ #!/usr/bin/env python +from __future__ import print_function import sys -import shutil -import glob -from filecmp import cmp -import subprocess -from zipfile import ZipFile import os +from string import Template import tempfile +from filecmp import cmp +import glob +import shutil +from zipfile import ZipFile +import subprocess -if len(sys.argv) != 4: - print "Wrong number of arguments" - sys.exit(1) +if len(sys.argv) != 5: + raise RuntimeError("Wrong number of arguments") propFile = sys.argv[1] home = sys.argv[2] -icaturl = sys.argv[3] - -with tempfile.NamedTemporaryFile(delete=False) as f: - name = f.name -shutil.copy(propFile, name) -with open (name, "a") as f: - f.write("icat.url = " + icaturl) +containerHome = sys.argv[3] +icaturl = sys.argv[4] -if os.path.exists('src/test/install/run.properties') and cmp(name, 'src/test/install/run.properties'): - sys.exit(0) +subst = dict(os.environ) +subst['HOME'] = home -print "Installing with " + propFile +try: + tmpf = tempfile.NamedTemporaryFile(delete=False) + name = tmpf.name + with open(name, "wt") as f: + with open(propFile, "rt") as s: + t = Template(s.read()).substitute(subst) + print(t, end="", file=f) + print("icat.url = %s" % icaturl, file=f) + print("testHome = %s" % home, file=f) + if (os.path.exists("src/test/install/run.properties") and + cmp(name, "src/test/install/run.properties")): + sys.exit(0) + print("Installing with %s" % propFile) + shutil.copy(name, "src/test/install/run.properties") +finally: + os.remove(name) -shutil.copy(name, 'src/test/install/run.properties') -os.remove(name) +for f in glob.glob("src/test/install/*.war"): + os.remove(f) -with open('src/test/install/setup.properties', 'w') as f: - f.write('secure = true\n') - f.write('container = Glassfish\n') - f.write('home = ' + home + '\n') - f.write('port = 4848\n') - f.write('libraries=ids.storage_test*.jar') +with open("src/test/install/setup.properties", "wt") as f: + print("secure = true", file=f) + print("container = Glassfish", file=f) + print("home = %s" % containerHome, file=f) + print("port = 4848", file=f) + print("libraries = ids.storage_test*.jar", file=f) -with open("src/test/install/run.properties.example", "w") as f: +with open("src/test/install/run.properties.example", "wt") as f: pass -shutil.copy(glob.glob('target/ids.server-*.war')[0] , 'src/test/install/') -shutil.copy('src/main/scripts/setup', 'src/test/install/') -z = ZipFile(glob.glob("target/ids.server-*-distro.zip")[0]) -bytes = z.read('ids.server/setup_utils.py') -f = open('src/test/install/setup_utils.py', 'w') -f.write(bytes) -f.close() -z.close() - -p = subprocess.Popen(['./setup', "install"], cwd='src/test/install') -p.wait() +shutil.copy(glob.glob("target/ids.server-*.war")[0], "src/test/install/") +shutil.copy("src/main/scripts/setup", "src/test/install/") +with ZipFile(glob.glob("target/ids.server-*-distro.zip")[0]) as z: + with open("src/test/install/setup_utils.py", "wb") as f: + f.write(z.read("ids.server/setup_utils.py")) +with open("src/main/resources/logback.xml", "rt") as s: + with open("src/test/install/logback.xml", "wt") as f: + t = Template(s.read()).substitute(subst) + print(t, end="", file=f) +p = subprocess.Popen(["./setup", "install"], cwd="src/test/install") +p.wait()