diff --git a/core/src/main/java/google/registry/model/domain/token/AllocationToken.java b/core/src/main/java/google/registry/model/domain/token/AllocationToken.java index b23212da6..38394e3a6 100644 --- a/core/src/main/java/google/registry/model/domain/token/AllocationToken.java +++ b/core/src/main/java/google/registry/model/domain/token/AllocationToken.java @@ -57,6 +57,7 @@ import javax.persistence.Column; import javax.persistence.EnumType; import javax.persistence.Enumerated; import javax.persistence.Table; +import javax.persistence.Transient; import org.joda.time.DateTime; /** An entity representing an allocation token. */ @@ -105,7 +106,8 @@ public class AllocationToken extends BackupGroupRoot implements Buildable, Datas @javax.persistence.Id @Id String token; /** The key of the history entry for which the token was used. Null if not yet used. */ - @Nullable @Index VKey redemptionHistoryEntry; + // TODO(b/172848495): Remove the "Transient" when we can finally persist and restore this. + @Transient @Nullable @Index VKey redemptionHistoryEntry; /** The fully-qualified domain name that this token is limited to, if any. */ @Nullable @Index String domainName; diff --git a/core/src/main/java/google/registry/model/ofy/CommitLoggedWork.java b/core/src/main/java/google/registry/model/ofy/CommitLoggedWork.java index 26f110df6..0e761c632 100644 --- a/core/src/main/java/google/registry/model/ofy/CommitLoggedWork.java +++ b/core/src/main/java/google/registry/model/ofy/CommitLoggedWork.java @@ -161,6 +161,7 @@ class CommitLoggedWork implements Runnable { .addAll(untouchedRootsWithTouchedChildren) .build()) .now(); + ReplayQueue.addInTests(info); } /** Check that the timestamp of each BackupGroupRoot is in the past. */ diff --git a/core/src/main/java/google/registry/model/ofy/ReplayQueue.java b/core/src/main/java/google/registry/model/ofy/ReplayQueue.java new file mode 100644 index 000000000..75874dafa --- /dev/null +++ b/core/src/main/java/google/registry/model/ofy/ReplayQueue.java @@ -0,0 +1,46 @@ +// Copyright 2020 The Nomulus Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package google.registry.model.ofy; + +import google.registry.config.RegistryEnvironment; +import java.util.concurrent.ConcurrentLinkedQueue; + +/** + * Implements simplified datastore to SQL transaction replay. + * + *

This code is to be removed when the actual replay cron job is implemented. + */ +public class ReplayQueue { + + static ConcurrentLinkedQueue queue = + new ConcurrentLinkedQueue(); + + static void addInTests(TransactionInfo info) { + if (RegistryEnvironment.get() == RegistryEnvironment.UNITTEST) { + queue.add(info); + } + } + + public static void replay() { + TransactionInfo info; + while ((info = queue.poll()) != null) { + info.saveToJpa(); + } + } + + public static void clear() { + queue.clear(); + } +} diff --git a/core/src/main/java/google/registry/model/ofy/TransactionInfo.java b/core/src/main/java/google/registry/model/ofy/TransactionInfo.java index 33be2e018..115bd690c 100644 --- a/core/src/main/java/google/registry/model/ofy/TransactionInfo.java +++ b/core/src/main/java/google/registry/model/ofy/TransactionInfo.java @@ -21,17 +21,25 @@ import static com.google.common.collect.Maps.filterValues; import static com.google.common.collect.Maps.toMap; import static google.registry.model.ofy.CommitLogBucket.getArbitraryBucketId; import static google.registry.model.ofy.ObjectifyService.ofy; +import static google.registry.persistence.transaction.TransactionManagerFactory.jpaTm; +import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.googlecode.objectify.Key; +import google.registry.persistence.VKey; +import google.registry.schema.replay.DatastoreEntity; +import google.registry.schema.replay.SqlEntity; import java.util.Map; import org.joda.time.DateTime; /** Metadata for an {@link Ofy} transaction that saves commit logs. */ class TransactionInfo { - private enum Delete { SENTINEL } + @VisibleForTesting + enum Delete { + SENTINEL + } /** Logical "now" of the transaction. */ DateTime transactionTime; @@ -92,4 +100,49 @@ class TransactionInfo { .filter(not(Delete.SENTINEL::equals)) .collect(toImmutableSet()); } + + // Mapping from class name to "weight" (which in this case is the order in which the class must + // be "put" in a transaction with respect to instances of other classes). Lower weight classes + // are put first, by default all classes have a weight of zero. + static final ImmutableMap CLASS_WEIGHTS = + ImmutableMap.of( + "HistoryEntry", -1, + "DomainBase", 1); + + // The beginning of the range of weights reserved for delete. This must be greater than any of + // the values in CLASS_WEIGHTS by enough overhead to accomodate any negative values in it. + @VisibleForTesting static final int DELETE_RANGE = Integer.MAX_VALUE / 2; + + /** Returns the weight of the entity type in the map entry. */ + @VisibleForTesting + static int getWeight(ImmutableMap.Entry, Object> entry) { + int weight = CLASS_WEIGHTS.getOrDefault(entry.getKey().getKind(), 0); + return entry.getValue().equals(Delete.SENTINEL) ? DELETE_RANGE - weight : weight; + } + + private static int compareByWeight( + ImmutableMap.Entry, Object> a, ImmutableMap.Entry, Object> b) { + return getWeight(a) - getWeight(b); + } + + void saveToJpa() { + // Sort the changes into an order that will work for insertion into the database. + jpaTm() + .transact( + () -> { + changesBuilder.build().entrySet().stream() + .sorted(TransactionInfo::compareByWeight) + .forEach( + entry -> { + if (entry.getValue().equals(Delete.SENTINEL)) { + jpaTm().delete(VKey.from(entry.getKey())); + } else { + for (SqlEntity entity : + ((DatastoreEntity) entry.getValue()).toSqlEntities()) { + jpaTm().put(entity); + } + } + }); + }); + } } diff --git a/core/src/main/java/google/registry/model/registry/label/BaseDomainLabelList.java b/core/src/main/java/google/registry/model/registry/label/BaseDomainLabelList.java index 9dbe1f9fd..ac67bfe1b 100644 --- a/core/src/main/java/google/registry/model/registry/label/BaseDomainLabelList.java +++ b/core/src/main/java/google/registry/model/registry/label/BaseDomainLabelList.java @@ -79,7 +79,7 @@ public abstract class BaseDomainLabelList, R extends Dom // set to the timestamp when the list is created. In Datastore, we have two fields and the // lastUpdateTime is set to the current timestamp when creating and updating a list. So, we use // lastUpdateTime as the creation_timestamp column during the dual-write phase for compatibility. - @Column(name = "creation_timestamp", nullable = false) + @Column(name = "creation_timestamp") DateTime lastUpdateTime; /** Returns the ID of this revision, or throws if null. */ diff --git a/core/src/test/java/google/registry/flows/FlowTestCase.java b/core/src/test/java/google/registry/flows/FlowTestCase.java index f110289a9..eb4e35a96 100644 --- a/core/src/test/java/google/registry/flows/FlowTestCase.java +++ b/core/src/test/java/google/registry/flows/FlowTestCase.java @@ -59,6 +59,7 @@ import java.util.Map; import javax.annotation.Nullable; import org.joda.time.DateTime; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Order; import org.junit.jupiter.api.extension.RegisterExtension; /** @@ -80,12 +81,6 @@ public abstract class FlowTestCase { SUPERUSER } - @RegisterExtension - final AppEngineExtension appEngine = - AppEngineExtension.builder().withDatastoreAndCloudSql().withTaskQueue().build(); - - @RegisterExtension final InjectExtension inject = new InjectExtension(); - protected EppLoader eppLoader; protected SessionMetadata sessionMetadata; protected FakeClock clock = new FakeClock(DateTime.now(UTC)); @@ -95,14 +90,24 @@ public abstract class FlowTestCase { private EppMetric.Builder eppMetricBuilder; + // Set the clock for transactional flows. We have to order this before the AppEngineExtension + // which populates data (and may do so with clock-dependent commit logs if mixed with + // ReplayExtension). + @Order(value = Order.DEFAULT - 1) + @RegisterExtension + final InjectExtension inject = + new InjectExtension().withStaticFieldOverride(Ofy.class, "clock", clock); + + @RegisterExtension + final AppEngineExtension appEngine = + AppEngineExtension.builder().withDatastoreAndCloudSql().withTaskQueue().build(); + @BeforeEach public void beforeEachFlowTestCase() { sessionMetadata = new HttpSessionMetadata(new FakeHttpSession()); sessionMetadata.setClientId("TheRegistrar"); sessionMetadata.setServiceExtensionUris(ProtocolDefinition.getVisibleServiceExtensionUris()); ofy().saveWithoutBackup().entity(new ClaimsListSingleton()).now(); - // For transactional flows - inject.setStaticField(Ofy.class, "clock", clock); } protected void removeServiceExtensionUri(String uri) { diff --git a/core/src/test/java/google/registry/flows/domain/DomainCreateFlowTest.java b/core/src/test/java/google/registry/flows/domain/DomainCreateFlowTest.java index 3dbf750bd..3226d1c68 100644 --- a/core/src/test/java/google/registry/flows/domain/DomainCreateFlowTest.java +++ b/core/src/test/java/google/registry/flows/domain/DomainCreateFlowTest.java @@ -163,6 +163,7 @@ import google.registry.model.reporting.DomainTransactionRecord.TransactionReport import google.registry.model.reporting.HistoryEntry; import google.registry.monitoring.whitebox.EppMetric; import google.registry.persistence.VKey; +import google.registry.testing.ReplayExtension; import google.registry.testing.TaskQueueHelper.TaskMatcher; import java.math.BigDecimal; import java.util.Map; @@ -171,7 +172,9 @@ import org.joda.money.Money; import org.joda.time.DateTime; import org.joda.time.Duration; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; /** Unit tests for {@link DomainCreateFlow}. */ class DomainCreateFlowTest extends ResourceFlowTestCase { @@ -180,6 +183,10 @@ class DomainCreateFlowTest extends ResourceFlowTestCase, Object> actions = + ImmutableMap.of( + Key.create(HistoryEntry.class, 100), TransactionInfo.Delete.SENTINEL, + Key.create(HistoryEntry.class, 200), "fake history entry", + Key.create(Registrar.class, 300), "fake registrar"); + ImmutableMap expectedValues = + ImmutableMap.of(100L, TransactionInfo.DELETE_RANGE + 1, 200L, -1, 300L, 0); + + for (ImmutableMap.Entry, Object> entry : actions.entrySet()) { + assertThat(TransactionInfo.getWeight(entry)) + .isEqualTo(expectedValues.get(entry.getKey().getId())); + } + } +} diff --git a/core/src/test/java/google/registry/testing/AppEngineExtension.java b/core/src/test/java/google/registry/testing/AppEngineExtension.java index bc22ca122..06f895ac8 100644 --- a/core/src/test/java/google/registry/testing/AppEngineExtension.java +++ b/core/src/test/java/google/registry/testing/AppEngineExtension.java @@ -477,6 +477,24 @@ public final class AppEngineExtension implements BeforeEachCallback, AfterEachCa public void afterEach(ExtensionContext context) throws Exception { checkArgumentNotNull(context, "The ExtensionContext must not be null"); try { + // If there is a replay extension, we'll want to call its replayToSql() method. + // + // We have to provide this hook here for ReplayExtension instead of relying on + // ReplayExtension's afterEach() method because of ordering and the conflation of environment + // initialization and basic entity initialization. + // + // ReplayExtension's beforeEach() has to be called before this so that the entities that we + // initialize (e.g. "TheRegistrar") also get replayed. But that means that ReplayExtension's + // afterEach() won't be called until after ours. Since we tear down the datastore and SQL + // database in our own afterEach(), ReplayExtension's afterEach() would fail if we let the + // replay happen there. + ReplayExtension replayer = + (ReplayExtension) + context.getStore(ExtensionContext.Namespace.GLOBAL).get(ReplayExtension.class); + if (replayer != null) { + replayer.replayToSql(); + } + if (withCloudSql) { if (enableJpaEntityCoverageCheck) { jpaIntegrationWithCoverageExtension.afterEach(context); diff --git a/core/src/test/java/google/registry/testing/DatastoreHelper.java b/core/src/test/java/google/registry/testing/DatastoreHelper.java index 23b3b8408..fbc2f1383 100644 --- a/core/src/test/java/google/registry/testing/DatastoreHelper.java +++ b/core/src/test/java/google/registry/testing/DatastoreHelper.java @@ -122,6 +122,14 @@ import org.joda.time.DateTimeZone; /** Static utils for setting up test resources. */ public class DatastoreHelper { + // The following two fields are injected by ReplayExtension. + + // If this is true, all of the methods that save to the datastore do so with backup. + private static boolean alwaysSaveWithBackup; + + // If the clock is defined, it will always be advanced by one millsecond after a transaction. + private static FakeClock clock; + private static final Supplier DEFAULT_PREMIUM_LIST_CONTENTS = memoize( () -> @@ -132,6 +140,20 @@ public class DatastoreHelper { DatastoreHelper.class, "default_premium_list_testdata.csv")), String.class)); + public static void setAlwaysSaveWithBackup(boolean enable) { + alwaysSaveWithBackup = enable; + } + + public static void setClock(FakeClock fakeClock) { + clock = fakeClock; + } + + private static void maybeAdvanceClock() { + if (clock != null) { + clock.advanceOneMilli(); + } + } + public static HostResource newHostResource(String hostName) { return newHostResourceWithRoid(hostName, generateNewContactHostRoid()); } @@ -312,6 +334,7 @@ public class DatastoreHelper { // the // transaction time is set correctly. tm().transactNew(() -> LordnTaskUtils.enqueueDomainBaseTask(persistedDomain)); + maybeAdvanceClock(); return persistedDomain; } @@ -365,14 +388,25 @@ public class DatastoreHelper { PremiumListRevision revision = PremiumListRevision.create(premiumList, entries.keySet()); if (tm().isOfy()) { - tm().putAllWithoutBackup( - ImmutableList.of( - premiumList.asBuilder().setRevision(Key.create(revision)).build(), revision)); - tm().putAllWithoutBackup( - parentPremiumListEntriesOnRevision(entries.values(), Key.create(revision))); + ImmutableList premiumLists = + ImmutableList.of( + premiumList.asBuilder().setRevision(Key.create(revision)).build(), revision); + ImmutableSet entriesOnRevision = + parentPremiumListEntriesOnRevision(entries.values(), Key.create(revision)); + if (alwaysSaveWithBackup) { + tm().transact( + () -> { + tm().putAll(premiumLists); + tm().putAll(entriesOnRevision); + }); + } else { + tm().putAllWithoutBackup(premiumLists); + tm().putAllWithoutBackup(entriesOnRevision); + } } else { tm().transact(() -> tm().insert(premiumList)); } + maybeAdvanceClock(); // The above premiumList is in the session cache and it is different from the corresponding // entity stored in Datastore because it has some @Ignore fields set dedicated for SQL. This // breaks the assumption we have in our application code, see @@ -934,7 +968,7 @@ public class DatastoreHelper { private static void saveResource(R resource, boolean wantBackup) { if (tm().isOfy()) { - Saver saver = wantBackup ? ofy().save() : ofy().saveWithoutBackup(); + Saver saver = wantBackup || alwaysSaveWithBackup ? ofy().save() : ofy().saveWithoutBackup(); saver.entity(resource); if (resource instanceof EppResource) { EppResource eppResource = (EppResource) resource; @@ -962,6 +996,7 @@ public class DatastoreHelper { .that(resource) .isNotInstanceOf(Buildable.Builder.class); tm().transact(() -> saveResource(resource, wantBackup)); + maybeAdvanceClock(); // Force the session cache to be cleared so that when we read the resource back, we read from // Datastore and not from the session cache. This is needed to trigger Objectify's load process // (unmarshalling entity protos to POJOs, nulling out empty collections, calling @OnLoad @@ -984,6 +1019,7 @@ public class DatastoreHelper { tm().put(resource); } }); + maybeAdvanceClock(); tm().clearSessionCache(); return transactIfJpaTm(() -> tm().load(resource)); } @@ -1001,6 +1037,7 @@ public class DatastoreHelper { // Persist domains ten at a time, to avoid exceeding the entity group limit. for (final List chunk : Iterables.partition(resources, 10)) { tm().transact(() -> chunk.forEach(resource -> saveResource(resource, wantBackup))); + maybeAdvanceClock(); } // Force the session to be cleared so that when we read it back, we read from Datastore // and not from the transaction's session cache. @@ -1035,6 +1072,7 @@ public class DatastoreHelper { ofyTmOrDoNothing( () -> tm().put(ForeignKeyIndex.create(resource, resource.getDeletionTime()))); }); + maybeAdvanceClock(); tm().clearSessionCache(); return transactIfJpaTm(() -> tm().load(resource)); } @@ -1128,7 +1166,15 @@ public class DatastoreHelper { * ForeignKeyedEppResources. */ public static ImmutableList persistSimpleResources(final Iterable resources) { - tm().transact(() -> tm().putAllWithoutBackup(ImmutableList.copyOf(resources))); + tm().transact( + () -> { + if (alwaysSaveWithBackup) { + tm().putAll(ImmutableList.copyOf(resources)); + } else { + tm().putAllWithoutBackup(ImmutableList.copyOf(resources)); + } + }); + maybeAdvanceClock(); // Force the session to be cleared so that when we read it back, we read from Datastore // and not from the transaction's session cache. tm().clearSessionCache(); @@ -1136,7 +1182,11 @@ public class DatastoreHelper { } public static void deleteResource(final Object resource) { - transactIfJpaTm(() -> tm().deleteWithoutBackup(resource)); + if (alwaysSaveWithBackup) { + tm().transact(() -> tm().delete(resource)); + } else { + transactIfJpaTm(() -> tm().deleteWithoutBackup(resource)); + } // Force the session to be cleared so that when we read it back, we read from Datastore and // not from the transaction's session cache. tm().clearSessionCache(); @@ -1144,15 +1194,18 @@ public class DatastoreHelper { /** Force the create and update timestamps to get written into the resource. **/ public static R cloneAndSetAutoTimestamps(final R resource) { + R result; if (tm().isOfy()) { - return tm().transact(() -> ofy().load().fromEntity(ofy().save().toEntity(resource))); + result = tm().transact(() -> ofy().load().fromEntity(ofy().save().toEntity(resource))); } else { // We have to separate the read and write operation into different transactions // otherwise JPA would just return the input entity instead of actually creating a // clone. tm().transact(() -> tm().put(resource)); - return tm().transact(() -> tm().load(resource)); + result = tm().transact(() -> tm().load(resource)); } + maybeAdvanceClock(); + return result; } /** Returns the entire map of {@link PremiumListEntry}s for the given {@link PremiumList}. */ diff --git a/core/src/test/java/google/registry/testing/ReplayExtension.java b/core/src/test/java/google/registry/testing/ReplayExtension.java new file mode 100644 index 000000000..f41ecffd0 --- /dev/null +++ b/core/src/test/java/google/registry/testing/ReplayExtension.java @@ -0,0 +1,58 @@ +// Copyright 2020 The Nomulus Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package google.registry.testing; + +import google.registry.model.ofy.ReplayQueue; +import org.junit.jupiter.api.extension.AfterEachCallback; +import org.junit.jupiter.api.extension.BeforeEachCallback; +import org.junit.jupiter.api.extension.ExtensionContext; + +/** + * A JUnit extension that replays datastore transactions against postgresql. + * + *

This extension must be ordered before AppEngineExtension so that the test entities saved in + * that extension are also replayed. If AppEngineExtension is not used, + * JpaTransactionManagerExtension must be, and this extension should be ordered _after_ + * JpaTransactionManagerExtension so that writes to SQL work. + */ +public class ReplayExtension implements BeforeEachCallback, AfterEachCallback { + + FakeClock clock; + + public ReplayExtension(FakeClock clock) { + this.clock = clock; + } + + @Override + public void beforeEach(ExtensionContext context) { + DatastoreHelper.setClock(clock); + DatastoreHelper.setAlwaysSaveWithBackup(true); + ReplayQueue.clear(); + context.getStore(ExtensionContext.Namespace.GLOBAL).put(ReplayExtension.class, this); + } + + @Override + public void afterEach(ExtensionContext context) { + // This ensures that we do the replay even if we're not called from AppEngineExtension. It + // should be safe to call replayToSql() twice, as the replay queue should be empty the second + // time. + replayToSql(); + } + + public void replayToSql() { + DatastoreHelper.setAlwaysSaveWithBackup(false); + ReplayQueue.replay(); + } +} diff --git a/core/src/test/resources/google/registry/flows/domain/domain_create_response_eap_fee.xml b/core/src/test/resources/google/registry/flows/domain/domain_create_response_eap_fee.xml index ae8ff2993..469be5356 100644 --- a/core/src/test/resources/google/registry/flows/domain/domain_create_response_eap_fee.xml +++ b/core/src/test/resources/google/registry/flows/domain/domain_create_response_eap_fee.xml @@ -15,7 +15,7 @@ USD 26.00 - 100.00 + 100.00 diff --git a/core/src/test/resources/google/registry/flows/domain/domain_create_response_premium_eap.xml b/core/src/test/resources/google/registry/flows/domain/domain_create_response_premium_eap.xml index 4a54740ab..2854b478c 100644 --- a/core/src/test/resources/google/registry/flows/domain/domain_create_response_premium_eap.xml +++ b/core/src/test/resources/google/registry/flows/domain/domain_create_response_premium_eap.xml @@ -15,7 +15,7 @@ USD 200.00 - 100.00 + 100.00 diff --git a/db/src/main/resources/sql/er_diagram/brief_er_diagram.html b/db/src/main/resources/sql/er_diagram/brief_er_diagram.html index 5ccee4c97..5ed89b338 100644 --- a/db/src/main/resources/sql/er_diagram/brief_er_diagram.html +++ b/db/src/main/resources/sql/er_diagram/brief_er_diagram.html @@ -261,11 +261,11 @@ td.section { generated on - 2020-11-13 19:34:54.398919 + 2020-11-16 16:45:08.581361 last flyway file - V76__change_history_nullability.sql + V77__fixes_for_replay.sql @@ -284,7 +284,7 @@ td.section { generated on - 2020-11-13 19:34:54.398919 + 2020-11-16 16:45:08.581361 diff --git a/db/src/main/resources/sql/er_diagram/full_er_diagram.html b/db/src/main/resources/sql/er_diagram/full_er_diagram.html index 6b3ada54f..eeacb66c1 100644 --- a/db/src/main/resources/sql/er_diagram/full_er_diagram.html +++ b/db/src/main/resources/sql/er_diagram/full_er_diagram.html @@ -261,11 +261,11 @@ td.section { generated on - 2020-11-13 19:34:52.404634 + 2020-11-16 16:45:06.707088 last flyway file - V76__change_history_nullability.sql + V77__fixes_for_replay.sql @@ -284,7 +284,7 @@ td.section { generated on - 2020-11-13 19:34:52.404634 + 2020-11-16 16:45:06.707088 @@ -5690,70 +5690,70 @@ td.section { premiumlist_7c3ea68b - - + + public.PremiumList - - + + [table] - + revision_id - + - + bigserial not null - + - + auto-incremented - + creation_timestamp - + - - timestamptz not null + + timestamptz - + name - + - + text not null - + bloom_filter - + - + bytea not null - + currency - + - + text not null - + premiumentry_b0060b91:w->premiumlist_7c3ea68b:e - + - - - - + + + + fko0gw90lpo1tuee56l0nb6y6g5 @@ -11600,7 +11600,7 @@ td.section { creation_timestamp - timestamptz not null + timestamptz diff --git a/db/src/main/resources/sql/flyway.txt b/db/src/main/resources/sql/flyway.txt index ab8ef0821..829d26a1b 100644 --- a/db/src/main/resources/sql/flyway.txt +++ b/db/src/main/resources/sql/flyway.txt @@ -74,3 +74,4 @@ V73__singleton_entities.sql V74__sql_replay_checkpoint.sql V75__add_grace_period_history.sql V76__change_history_nullability.sql +V77__fixes_for_replay.sql diff --git a/db/src/main/resources/sql/flyway/V77__fixes_for_replay.sql b/db/src/main/resources/sql/flyway/V77__fixes_for_replay.sql new file mode 100644 index 000000000..e6f1af05f --- /dev/null +++ b/db/src/main/resources/sql/flyway/V77__fixes_for_replay.sql @@ -0,0 +1,15 @@ +-- Copyright 2020 The Nomulus Authors. All Rights Reserved. +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. + +ALTER TABLE "PremiumList" ALTER COLUMN creation_timestamp DROP NOT NULL; diff --git a/db/src/main/resources/sql/schema/db-schema.sql.generated b/db/src/main/resources/sql/schema/db-schema.sql.generated index 54a316071..5828406cc 100644 --- a/db/src/main/resources/sql/schema/db-schema.sql.generated +++ b/db/src/main/resources/sql/schema/db-schema.sql.generated @@ -22,7 +22,6 @@ discount_premiums boolean not null, discount_years int4 not null, domain_name text, - redemption_history_entry text, token_status_transitions hstore, token_type text, primary key (token) @@ -524,7 +523,7 @@ create table "PremiumList" ( revision_id bigserial not null, - creation_timestamp timestamptz not null, + creation_timestamp timestamptz, name text not null, bloom_filter bytea not null, currency text not null, @@ -635,7 +634,7 @@ create table "ReservedList" ( revision_id bigserial not null, - creation_timestamp timestamptz not null, + creation_timestamp timestamptz, name text not null, should_publish boolean not null, primary key (revision_id) diff --git a/db/src/main/resources/sql/schema/nomulus.golden.sql b/db/src/main/resources/sql/schema/nomulus.golden.sql index 05de4c4d4..482cec6ff 100644 --- a/db/src/main/resources/sql/schema/nomulus.golden.sql +++ b/db/src/main/resources/sql/schema/nomulus.golden.sql @@ -686,7 +686,7 @@ CREATE TABLE public."PremiumEntry" ( CREATE TABLE public."PremiumList" ( revision_id bigint NOT NULL, - creation_timestamp timestamp with time zone NOT NULL, + creation_timestamp timestamp with time zone, name text NOT NULL, bloom_filter bytea NOT NULL, currency text NOT NULL