mirror of
https://github.com/google/nomulus.git
synced 2025-07-21 10:16:07 +02:00
Use one SQL transaction per Datastore transaction in replay to SQL (#1268)
There was a subtle issue that we encountered in sandbox when using one transaction per file that was difficult to replicate. Basically, 1. Save a domain with dsData 2. Save the domain without dsData 3. Save the domain with the same dsData as step 1 4. Delete literally any object If one performs steps 2-4 in the same transaction, Hibernate will throw an exception (cascade re-saving a cascade-deleted object). Note that step 4 is in fact necessary to reproduce the issue, yay Hibernate. We will test this and if one transaction per transaction is too slow, we'll figure out ways to reduce the number of SQL transactions.
This commit is contained in:
parent
c7d44a0ee4
commit
320d4db5fb
2 changed files with 49 additions and 3 deletions
|
@ -189,7 +189,7 @@ public class ReplayCommitLogsToSqlAction implements Runnable {
|
||||||
searchStartTime.toString("yyyy-MM-dd HH"));
|
searchStartTime.toString("yyyy-MM-dd HH"));
|
||||||
}
|
}
|
||||||
for (BlobInfo file : fileBatch) {
|
for (BlobInfo file : fileBatch) {
|
||||||
jpaTm().transact(() -> processFile(file));
|
processFile(file);
|
||||||
filesProcessed++;
|
filesProcessed++;
|
||||||
if (clock.nowUtc().isAfter(replayTimeoutTime)) {
|
if (clock.nowUtc().isAfter(replayTimeoutTime)) {
|
||||||
return String.format(
|
return String.format(
|
||||||
|
@ -205,10 +205,11 @@ public class ReplayCommitLogsToSqlAction implements Runnable {
|
||||||
// Load and process the Datastore transactions one at a time
|
// Load and process the Datastore transactions one at a time
|
||||||
ImmutableList<ImmutableList<VersionedEntity>> allTransactions =
|
ImmutableList<ImmutableList<VersionedEntity>> allTransactions =
|
||||||
CommitLogImports.loadEntitiesByTransaction(input);
|
CommitLogImports.loadEntitiesByTransaction(input);
|
||||||
allTransactions.forEach(this::replayTransaction);
|
allTransactions.forEach(
|
||||||
|
transaction -> jpaTm().transact(() -> replayTransaction(transaction)));
|
||||||
// if we succeeded, set the last-seen time
|
// if we succeeded, set the last-seen time
|
||||||
DateTime checkpoint = DateTime.parse(metadata.getName().substring(DIFF_FILE_PREFIX.length()));
|
DateTime checkpoint = DateTime.parse(metadata.getName().substring(DIFF_FILE_PREFIX.length()));
|
||||||
SqlReplayCheckpoint.set(checkpoint);
|
jpaTm().transact(() -> SqlReplayCheckpoint.set(checkpoint));
|
||||||
logger.atInfo().log(
|
logger.atInfo().log(
|
||||||
"Replayed %d transactions from commit log file %s with size %d B.",
|
"Replayed %d transactions from commit log file %s with size %d B.",
|
||||||
allTransactions.size(), metadata.getName(), metadata.getSize());
|
allTransactions.size(), metadata.getName(), metadata.getSize());
|
||||||
|
|
|
@ -554,6 +554,51 @@ public class ReplayCommitLogsToSqlActionTest {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testReplay_deleteAndResaveCascade_withOtherDeletion_noErrors() throws Exception {
|
||||||
|
createTld("tld");
|
||||||
|
DateTime now = fakeClock.nowUtc();
|
||||||
|
jpaTm().transact(() -> SqlReplayCheckpoint.set(now.minusMinutes(1).minusMillis(1)));
|
||||||
|
|
||||||
|
// Save a domain with a particular dsData in SQL as the base
|
||||||
|
ImmutableSet<DelegationSignerData> dsData =
|
||||||
|
ImmutableSet.of(DelegationSignerData.create(1, 2, 3, new byte[] {4, 5, 6}));
|
||||||
|
DomainBase domainWithDsData =
|
||||||
|
newDomainBase("example.tld").asBuilder().setDsData(dsData).build();
|
||||||
|
jpaTm().transact(() -> jpaTm().put(domainWithDsData));
|
||||||
|
|
||||||
|
// Replay a version of that domain without the dsData
|
||||||
|
Key<CommitLogManifest> manifestKeyOne =
|
||||||
|
CommitLogManifest.createKey(getBucketKey(1), now.minusMinutes(3));
|
||||||
|
DomainBase domainWithoutDsData =
|
||||||
|
domainWithDsData.asBuilder().setDsData(ImmutableSet.of()).build();
|
||||||
|
CommitLogMutation domainWithoutDsDataMutation =
|
||||||
|
ofyTm().transact(() -> CommitLogMutation.create(manifestKeyOne, domainWithoutDsData));
|
||||||
|
|
||||||
|
// Create an object (any object) to delete via replay to trigger Hibernate flush events
|
||||||
|
TestObject testObject = TestObject.create("foo", "bar");
|
||||||
|
jpaTm().transact(() -> jpaTm().put(testObject));
|
||||||
|
|
||||||
|
// Replay the original domain, with the original dsData
|
||||||
|
Key<CommitLogManifest> manifestKeyTwo =
|
||||||
|
CommitLogManifest.createKey(getBucketKey(1), now.minusMinutes(2));
|
||||||
|
CommitLogMutation domainWithOriginalDsDataMutation =
|
||||||
|
ofyTm().transact(() -> CommitLogMutation.create(manifestKeyTwo, domainWithDsData));
|
||||||
|
|
||||||
|
// If we try to perform all the events in one transaction (cascade-removal of the dsData,
|
||||||
|
// cascade-adding the dsData back in, and deleting any other random object), Hibernate will
|
||||||
|
// throw an exception
|
||||||
|
saveDiffFile(
|
||||||
|
gcsUtils,
|
||||||
|
createCheckpoint(now.minusMinutes(1)),
|
||||||
|
CommitLogManifest.create(
|
||||||
|
getBucketKey(1), now.minusMinutes(3), ImmutableSet.of(Key.create(testObject))),
|
||||||
|
domainWithoutDsDataMutation,
|
||||||
|
CommitLogManifest.create(getBucketKey(1), now.minusMinutes(2), ImmutableSet.of()),
|
||||||
|
domainWithOriginalDsDataMutation);
|
||||||
|
runAndAssertSuccess(now.minusMinutes(1), 1);
|
||||||
|
}
|
||||||
|
|
||||||
private void runAndAssertSuccess(DateTime expectedCheckpointTime, int numFiles) {
|
private void runAndAssertSuccess(DateTime expectedCheckpointTime, int numFiles) {
|
||||||
action.run();
|
action.run();
|
||||||
assertThat(response.getStatus()).isEqualTo(SC_OK);
|
assertThat(response.getStatus()).isEqualTo(SC_OK);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue