mirror of
https://github.com/google/nomulus.git
synced 2025-07-24 11:38:35 +02:00
Unwrap the return value of loadAtPointInTime (#1205)
In SQL we do not need to wrap it in a Result. Unfortunately we cannot overload a function based on its return value so we renamed the existing one and created a new one with the old name that returns the resource directly. Once we no longer have use of Datastore we can delete the now renamed function that returns a Result<? extends EppResource> <!-- Reviewable:start --> --- This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/google/nomulus/1205) <!-- Reviewable:end -->
This commit is contained in:
parent
df6513ef06
commit
3f01a90647
5 changed files with 87 additions and 83 deletions
|
@ -29,8 +29,6 @@ import com.google.common.collect.ImmutableList;
|
|||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.flogger.FluentLogger;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Result;
|
||||
import com.googlecode.objectify.util.ResultNow;
|
||||
import google.registry.config.RegistryConfig;
|
||||
import google.registry.model.EppResource.BuilderWithTransferData;
|
||||
import google.registry.model.EppResource.ForeignKeyedEppResource;
|
||||
|
@ -54,6 +52,7 @@ import java.util.List;
|
|||
import java.util.Map.Entry;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import javax.annotation.Nullable;
|
||||
import javax.persistence.Query;
|
||||
import org.joda.time.DateTime;
|
||||
|
@ -299,43 +298,51 @@ public final class EppResourceUtils {
|
|||
* <p>When using the SQL backend (post-Registry-3.0-migration) this restriction goes away and
|
||||
* objects can be restored to any revision.
|
||||
*
|
||||
* <p>TODO(b/177567432): Once Datastore is completely removed, remove the Result wrapping.
|
||||
*
|
||||
* @return an asynchronous operation returning resource at {@code timestamp} or {@code null} if
|
||||
* resource is deleted or not yet created
|
||||
* @return the resource at {@code timestamp} or {@code null} if resource is deleted or not yet
|
||||
* created
|
||||
*/
|
||||
public static <T extends EppResource> Result<T> loadAtPointInTime(
|
||||
public static <T extends EppResource> T loadAtPointInTime(
|
||||
final T resource, final DateTime timestamp) {
|
||||
// If we're before the resource creation time, don't try to find a "most recent revision".
|
||||
if (timestamp.isBefore(resource.getCreationTime())) {
|
||||
return new ResultNow<>(null);
|
||||
return null;
|
||||
}
|
||||
// If the resource was not modified after the requested time, then use it as-is, otherwise find
|
||||
// the most recent revision asynchronously, and return an async result that wraps that revision
|
||||
// and returns it projected forward to exactly the desired timestamp, or null if the resource is
|
||||
// deleted at that timestamp.
|
||||
final Result<T> loadResult =
|
||||
// the most recent revision and project it forward to exactly the desired timestamp, or null if
|
||||
// the resource is deleted at that timestamp.
|
||||
T loadedResource =
|
||||
isAtOrAfter(timestamp, resource.getUpdateTimestamp().getTimestamp())
|
||||
? new ResultNow<>(resource)
|
||||
? resource
|
||||
: loadMostRecentRevisionAtTime(resource, timestamp);
|
||||
return () -> {
|
||||
T loadedResource = loadResult.now();
|
||||
return (loadedResource == null)
|
||||
? null
|
||||
: (isActive(loadedResource, timestamp)
|
||||
? cloneProjectedAtTime(loadedResource, timestamp)
|
||||
: null);
|
||||
};
|
||||
return (loadedResource == null)
|
||||
? null
|
||||
: (isActive(loadedResource, timestamp)
|
||||
? cloneProjectedAtTime(loadedResource, timestamp)
|
||||
: null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an asynchronous result holding the most recent revision of a given EppResource before
|
||||
* or at the provided timestamp, falling back to using the resource as-is if there are no
|
||||
* revisions.
|
||||
* Rewinds an {@link EppResource} object to a given point in time.
|
||||
*
|
||||
* <p>This method costs nothing if {@code resource} is already current. Otherwise it returns an
|
||||
* async operation that performs a single fetch operation.
|
||||
*
|
||||
* @return an asynchronous operation returning resource at {@code timestamp} or {@code null} if
|
||||
* resource is deleted or not yet created
|
||||
* @see #loadAtPointInTime(EppResource, DateTime)
|
||||
*/
|
||||
public static <T extends EppResource> Supplier<T> loadAtPointInTimeAsync(
|
||||
final T resource, final DateTime timestamp) {
|
||||
return () -> loadAtPointInTime(resource, timestamp);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the most recent revision of a given EppResource before or at the provided timestamp,
|
||||
* falling back to using the resource as-is if there are no revisions.
|
||||
*
|
||||
* @see #loadAtPointInTime(EppResource, DateTime)
|
||||
*/
|
||||
private static <T extends EppResource> Result<T> loadMostRecentRevisionAtTime(
|
||||
private static <T extends EppResource> T loadMostRecentRevisionAtTime(
|
||||
final T resource, final DateTime timestamp) {
|
||||
if (tm().isOfy()) {
|
||||
return loadMostRecentRevisionAtTimeDatastore(resource, timestamp);
|
||||
|
@ -345,46 +352,42 @@ public final class EppResourceUtils {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns an asynchronous result holding the most recent Datastore revision of a given
|
||||
* EppResource before or at the provided timestamp using the EppResource revisions map, falling
|
||||
* back to using the resource as-is if there are no revisions.
|
||||
* Returns the most recent Datastore revision of a given EppResource before or at the provided
|
||||
* timestamp using the EppResource revisions map, falling back to using the resource as-is if
|
||||
* there are no revisions.
|
||||
*
|
||||
* @see #loadAtPointInTime(EppResource, DateTime)
|
||||
* @see #loadAtPointInTimeAsync(EppResource, DateTime)
|
||||
*/
|
||||
private static <T extends EppResource> Result<T> loadMostRecentRevisionAtTimeDatastore(
|
||||
private static <T extends EppResource> T loadMostRecentRevisionAtTimeDatastore(
|
||||
final T resource, final DateTime timestamp) {
|
||||
final Key<T> resourceKey = Key.create(resource);
|
||||
final Key<CommitLogManifest> revision =
|
||||
findMostRecentDatastoreRevisionAtTime(resource, timestamp);
|
||||
if (revision == null) {
|
||||
logger.atSevere().log("No revision found for %s, falling back to resource.", resourceKey);
|
||||
return new ResultNow<>(resource);
|
||||
}
|
||||
final Result<CommitLogMutation> mutationResult =
|
||||
auditedOfy().load().key(CommitLogMutation.createKey(revision, resourceKey));
|
||||
return () -> {
|
||||
CommitLogMutation mutation = mutationResult.now();
|
||||
if (mutation != null) {
|
||||
return auditedOfy().load().fromEntity(mutation.getEntity());
|
||||
}
|
||||
logger.atSevere().log(
|
||||
"Couldn't load mutation for revision at %s for %s, falling back to resource."
|
||||
+ " Revision: %s",
|
||||
timestamp, resourceKey, revision);
|
||||
return resource;
|
||||
};
|
||||
}
|
||||
final CommitLogMutation mutation =
|
||||
auditedOfy().load().key(CommitLogMutation.createKey(revision, resourceKey)).now();
|
||||
if (mutation != null) {
|
||||
return auditedOfy().load().fromEntity(mutation.getEntity());
|
||||
}
|
||||
logger.atSevere().log(
|
||||
"Couldn't load mutation for revision at %s for %s, falling back to resource."
|
||||
+ " Revision: %s",
|
||||
timestamp, resourceKey, revision);
|
||||
return resource;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an asynchronous result holding the most recent SQL revision of a given EppResource
|
||||
* before or at the provided timestamp using *History objects, falling back to using the resource
|
||||
* as-is if there are no revisions.
|
||||
* Returns the most recent SQL revision of a given EppResource before or at the provided timestamp
|
||||
* using *History objects, falling back to using the resource as-is if there are no revisions.
|
||||
*
|
||||
* @see #loadAtPointInTime(EppResource, DateTime)
|
||||
* @see #loadAtPointInTimeAsync(EppResource, DateTime)
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
private static <T extends EppResource> Result<T> loadMostRecentRevisionAtTimeSql(
|
||||
private static <T extends EppResource> T loadMostRecentRevisionAtTimeSql(
|
||||
T resource, DateTime timestamp) {
|
||||
@SuppressWarnings("unchecked")
|
||||
T resourceAtPointInTime =
|
||||
(T)
|
||||
HistoryEntryDao.loadHistoryObjectsForResource(
|
||||
|
@ -397,9 +400,9 @@ public final class EppResourceUtils {
|
|||
logger.atSevere().log(
|
||||
"Couldn't load resource at % for key %s, falling back to resource %s.",
|
||||
timestamp, resource.createVKey(), resource);
|
||||
return new ResultNow<>(resource);
|
||||
return resource;
|
||||
}
|
||||
return new ResultNow<>(resourceAtPointInTime);
|
||||
return resourceAtPointInTime;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
|
|
|
@ -17,6 +17,7 @@ package google.registry.rde;
|
|||
import static com.google.common.base.Strings.nullToEmpty;
|
||||
import static com.google.common.collect.ImmutableSet.toImmutableSet;
|
||||
import static google.registry.model.EppResourceUtils.loadAtPointInTime;
|
||||
import static google.registry.model.EppResourceUtils.loadAtPointInTimeAsync;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
|
||||
import com.google.appengine.tools.mapreduce.Mapper;
|
||||
|
@ -26,7 +27,6 @@ import com.google.common.collect.ImmutableSet;
|
|||
import com.google.common.collect.ImmutableSetMultimap;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.googlecode.objectify.Result;
|
||||
import google.registry.model.EppResource;
|
||||
import google.registry.model.contact.ContactResource;
|
||||
import google.registry.model.domain.DomainBase;
|
||||
|
@ -36,7 +36,9 @@ import google.registry.model.registrar.Registrar;
|
|||
import google.registry.xml.ValidationMode;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Supplier;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
/** Mapper for {@link RdeStagingAction}. */
|
||||
|
@ -123,8 +125,8 @@ public final class RdeStagingMapper extends Mapper<EppResource, PendingDeposit,
|
|||
.collect(toImmutableSet());
|
||||
|
||||
// Launch asynchronous fetches of point-in-time representations of resource.
|
||||
ImmutableMap<DateTime, Result<EppResource>> resourceAtTimes =
|
||||
ImmutableMap.copyOf(Maps.asMap(dates, input -> loadAtPointInTime(resource, input)));
|
||||
ImmutableMap<DateTime, Supplier<EppResource>> resourceAtTimes =
|
||||
ImmutableMap.copyOf(Maps.asMap(dates, input -> loadAtPointInTimeAsync(resource, input)));
|
||||
|
||||
// Convert resource to an XML fragment for each watermark/mode pair lazily and cache the result.
|
||||
Fragmenter fragmenter = new Fragmenter(resourceAtTimes);
|
||||
|
@ -159,13 +161,13 @@ public final class RdeStagingMapper extends Mapper<EppResource, PendingDeposit,
|
|||
/** Loading cache that turns a resource into XML for the various points in time and modes. */
|
||||
private class Fragmenter {
|
||||
private final Map<WatermarkModePair, Optional<DepositFragment>> cache = new HashMap<>();
|
||||
private final ImmutableMap<DateTime, Result<EppResource>> resourceAtTimes;
|
||||
private final ImmutableMap<DateTime, Supplier<EppResource>> resourceAtTimes;
|
||||
|
||||
long cacheHits = 0;
|
||||
long resourcesNotFound = 0;
|
||||
long resourcesFound = 0;
|
||||
|
||||
Fragmenter(ImmutableMap<DateTime, Result<EppResource>> resourceAtTimes) {
|
||||
Fragmenter(ImmutableMap<DateTime, Supplier<EppResource>> resourceAtTimes) {
|
||||
this.resourceAtTimes = resourceAtTimes;
|
||||
}
|
||||
|
||||
|
@ -175,7 +177,7 @@ public final class RdeStagingMapper extends Mapper<EppResource, PendingDeposit,
|
|||
cacheHits++;
|
||||
return result;
|
||||
}
|
||||
EppResource resource = resourceAtTimes.get(watermark).now();
|
||||
EppResource resource = resourceAtTimes.get(watermark).get();
|
||||
if (resource == null) {
|
||||
result = Optional.empty();
|
||||
cache.put(WatermarkModePair.create(watermark, RdeMode.FULL), result);
|
||||
|
@ -202,8 +204,9 @@ public final class RdeStagingMapper extends Mapper<EppResource, PendingDeposit,
|
|||
host,
|
||||
// Note that loadAtPointInTime() does cloneProjectedAtTime(watermark) for
|
||||
// us.
|
||||
loadAtPointInTime(tm().loadByKey(host.getSuperordinateDomain()), watermark)
|
||||
.now())
|
||||
Objects.requireNonNull(
|
||||
loadAtPointInTime(
|
||||
tm().loadByKey(host.getSuperordinateDomain()), watermark)))
|
||||
: marshaller.marshalExternalHost(host));
|
||||
cache.put(WatermarkModePair.create(watermark, RdeMode.FULL), result);
|
||||
cache.put(WatermarkModePair.create(watermark, RdeMode.THIN), result);
|
||||
|
|
|
@ -198,7 +198,7 @@ public class GenerateZoneFilesAction implements Runnable, JsonActionRunner.JsonA
|
|||
private void mapDomain(DomainBase domain) {
|
||||
// Domains never change their tld, so we can check if it's from the wrong tld right away.
|
||||
if (tlds.contains(domain.getTld())) {
|
||||
domain = loadAtPointInTime(domain, exportTime).now();
|
||||
domain = loadAtPointInTime(domain, exportTime);
|
||||
// A null means the domain was deleted (or not created) at this time.
|
||||
if (domain != null && domain.shouldPublishToDns()) {
|
||||
String stanza = domainStanza(domain, exportTime, dnsDefaultNsTtl, dnsDefaultDsTtl);
|
||||
|
@ -215,7 +215,7 @@ public class GenerateZoneFilesAction implements Runnable, JsonActionRunner.JsonA
|
|||
ImmutableSet<String> subordinateHosts = domain.getSubordinateHosts();
|
||||
if (!subordinateHosts.isEmpty()) {
|
||||
for (HostResource unprojectedHost : tm().loadByKeys(domain.getNameservers()).values()) {
|
||||
HostResource host = loadAtPointInTime(unprojectedHost, exportTime).now();
|
||||
HostResource host = loadAtPointInTime(unprojectedHost, exportTime);
|
||||
// A null means the host was deleted (or not created) at this time.
|
||||
if ((host != null) && subordinateHosts.contains(host.getHostName())) {
|
||||
String stanza = hostStanza(host, dnsDefaultATtl, domain.getTld());
|
||||
|
@ -290,7 +290,7 @@ public class GenerateZoneFilesAction implements Runnable, JsonActionRunner.JsonA
|
|||
domainLabel,
|
||||
dnsDefaultNsTtl.getStandardSeconds(),
|
||||
// Load the nameservers at the export time in case they've been renamed or deleted.
|
||||
loadAtPointInTime(nameserver, exportTime).now().getHostName()));
|
||||
loadAtPointInTime(nameserver, exportTime).getHostName()));
|
||||
}
|
||||
for (DelegationSignerData dsData : domain.getDsData()) {
|
||||
result.append(
|
||||
|
|
|
@ -138,13 +138,13 @@ class EppPointInTimeTest {
|
|||
|
||||
// Creation time has millisecond granularity due to isActive() check.
|
||||
tm().clearSessionCache();
|
||||
assertThat(loadAtPointInTime(latest, timeAtCreate.minusMillis(1)).now()).isNull();
|
||||
assertThat(loadAtPointInTime(latest, timeAtCreate).now()).isNotNull();
|
||||
assertThat(loadAtPointInTime(latest, timeAtCreate.plusMillis(1)).now()).isNotNull();
|
||||
assertThat(loadAtPointInTime(latest, timeAtCreate.minusMillis(1))).isNull();
|
||||
assertThat(loadAtPointInTime(latest, timeAtCreate)).isNotNull();
|
||||
assertThat(loadAtPointInTime(latest, timeAtCreate.plusMillis(1))).isNotNull();
|
||||
|
||||
tm().clearSessionCache();
|
||||
assertAboutImmutableObjects()
|
||||
.that(loadAtPointInTime(latest, timeAtCreate.plusDays(1)).now())
|
||||
.that(loadAtPointInTime(latest, timeAtCreate.plusDays(1)))
|
||||
.hasFieldsEqualTo(domainAfterCreate);
|
||||
|
||||
tm().clearSessionCache();
|
||||
|
@ -152,30 +152,30 @@ class EppPointInTimeTest {
|
|||
// Both updates happened on the same day. Since the revisions field has day granularity in
|
||||
// Datastore, the key to the first update should have been overwritten by the second, and its
|
||||
// timestamp rolled forward. So we have to fall back to the last revision before midnight.
|
||||
assertThat(loadAtPointInTime(latest, timeAtFirstUpdate).now()).isEqualTo(domainAfterCreate);
|
||||
assertThat(loadAtPointInTime(latest, timeAtFirstUpdate)).isEqualTo(domainAfterCreate);
|
||||
} else {
|
||||
// In SQL, however, we are not limited by the day granularity, so when we request the object
|
||||
// at timeAtFirstUpdate we should receive the object at that first update, even though the
|
||||
// second update occurred one millisecond later.
|
||||
assertAboutImmutableObjects()
|
||||
.that(loadAtPointInTime(latest, timeAtFirstUpdate).now())
|
||||
.that(loadAtPointInTime(latest, timeAtFirstUpdate))
|
||||
.hasFieldsEqualTo(domainAfterFirstUpdate);
|
||||
}
|
||||
|
||||
tm().clearSessionCache();
|
||||
assertAboutImmutableObjects()
|
||||
.that(loadAtPointInTime(latest, timeAtSecondUpdate).now())
|
||||
.that(loadAtPointInTime(latest, timeAtSecondUpdate))
|
||||
.hasFieldsEqualTo(domainAfterSecondUpdate);
|
||||
|
||||
tm().clearSessionCache();
|
||||
assertAboutImmutableObjects()
|
||||
.that(loadAtPointInTime(latest, timeAtSecondUpdate.plusDays(1)).now())
|
||||
.that(loadAtPointInTime(latest, timeAtSecondUpdate.plusDays(1)))
|
||||
.hasFieldsEqualTo(domainAfterSecondUpdate);
|
||||
|
||||
// Deletion time has millisecond granularity due to isActive() check.
|
||||
tm().clearSessionCache();
|
||||
assertThat(loadAtPointInTime(latest, timeAtDelete.minusMillis(1)).now()).isNotNull();
|
||||
assertThat(loadAtPointInTime(latest, timeAtDelete).now()).isNull();
|
||||
assertThat(loadAtPointInTime(latest, timeAtDelete.plusMillis(1)).now()).isNull();
|
||||
assertThat(loadAtPointInTime(latest, timeAtDelete.minusMillis(1))).isNotNull();
|
||||
assertThat(loadAtPointInTime(latest, timeAtDelete)).isNull();
|
||||
assertThat(loadAtPointInTime(latest, timeAtDelete.plusMillis(1))).isNull();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ class EppResourceUtilsTest {
|
|||
newHostResource("ns1.cat.tld").asBuilder()
|
||||
.setCreationTimeForTest(clock.nowUtc())
|
||||
.build());
|
||||
assertThat(loadAtPointInTime(host, clock.nowUtc().minus(Duration.millis(1))).now()).isNull();
|
||||
assertThat(loadAtPointInTime(host, clock.nowUtc().minus(Duration.millis(1)))).isNull();
|
||||
}
|
||||
|
||||
@TestOfyAndSql
|
||||
|
@ -78,7 +78,7 @@ class EppResourceUtilsTest {
|
|||
newHostResource("ns1.cat.tld").asBuilder()
|
||||
.setCreationTimeForTest(START_OF_TIME)
|
||||
.build());
|
||||
assertThat(loadAtPointInTime(host, clock.nowUtc()).now()).isEqualTo(host);
|
||||
assertThat(loadAtPointInTime(host, clock.nowUtc())).isEqualTo(host);
|
||||
}
|
||||
|
||||
@TestOfyOnly
|
||||
|
@ -99,8 +99,7 @@ class EppResourceUtilsTest {
|
|||
.build());
|
||||
// Load at the point in time just before the latest update; the floor entry of the revisions
|
||||
// map should point to the manifest for the first save, so we should get the old host.
|
||||
assertThat(loadAtPointInTime(currentHost, clock.nowUtc().minusMillis(1)).now())
|
||||
.isEqualTo(oldHost);
|
||||
assertThat(loadAtPointInTime(currentHost, clock.nowUtc().minusMillis(1))).isEqualTo(oldHost);
|
||||
}
|
||||
|
||||
@TestOfyOnly
|
||||
|
@ -120,7 +119,7 @@ class EppResourceUtilsTest {
|
|||
// Load at the point in time just before the latest update; the old host is not recoverable
|
||||
// (revisions map link is broken, and guessing using the oldest revision map entry finds the
|
||||
// same broken link), so just returns the current host.
|
||||
assertThat(loadAtPointInTime(host, clock.nowUtc().minusMillis(1)).now()).isEqualTo(host);
|
||||
assertThat(loadAtPointInTime(host, clock.nowUtc().minusMillis(1))).isEqualTo(host);
|
||||
}
|
||||
|
||||
@TestOfyOnly
|
||||
|
@ -141,8 +140,7 @@ class EppResourceUtilsTest {
|
|||
// Load at the point in time before the first update; there will be no floor entry for the
|
||||
// revisions map, so give up and return the oldest revision entry's mutation value (the old host
|
||||
// data).
|
||||
assertThat(loadAtPointInTime(currentHost, clock.nowUtc().minusDays(2)).now())
|
||||
.isEqualTo(oldHost);
|
||||
assertThat(loadAtPointInTime(currentHost, clock.nowUtc().minusDays(2))).isEqualTo(oldHost);
|
||||
}
|
||||
|
||||
@TestOfyOnly
|
||||
|
@ -157,7 +155,7 @@ class EppResourceUtilsTest {
|
|||
// Load at the point in time before the first save; there will be no floor entry for the
|
||||
// revisions map. Since the oldest revision entry is the only (i.e. current) revision, return
|
||||
// the resource.
|
||||
assertThat(loadAtPointInTime(host, clock.nowUtc().minusMillis(1)).now()).isEqualTo(host);
|
||||
assertThat(loadAtPointInTime(host, clock.nowUtc().minusMillis(1))).isEqualTo(host);
|
||||
}
|
||||
|
||||
@TestOfyOnly
|
||||
|
@ -175,7 +173,7 @@ class EppResourceUtilsTest {
|
|||
// Even though there is no revision, make a best effort guess to use the oldest revision.
|
||||
assertThat(
|
||||
loadAtPointInTime(host, clock.nowUtc().minus(Duration.standardDays(32)))
|
||||
.now()
|
||||
|
||||
.getUpdateTimestamp()
|
||||
.getTimestamp())
|
||||
.isEqualTo(host.getRevisions().firstKey());
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue