mirror of
https://github.com/google/nomulus.git
synced 2025-05-13 07:57:13 +02:00
Refactor Guava functional methods to use lambdas
------------- Created by MOE: https://github.com/google/moe MOE_MIGRATED_REVID=177027488
This commit is contained in:
parent
2ae496bfce
commit
bbe2584da4
47 changed files with 478 additions and 647 deletions
|
@ -23,7 +23,6 @@ import static google.registry.util.DateTimeUtils.earliestOf;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Work;
|
||||
import google.registry.model.ofy.CommitLogBucket;
|
||||
import google.registry.model.ofy.CommitLogCheckpoint;
|
||||
import google.registry.model.ofy.CommitLogManifest;
|
||||
|
@ -116,15 +115,14 @@ class CommitLogCheckpointStrategy {
|
|||
@VisibleForTesting
|
||||
ImmutableMap<Integer, DateTime> readBucketTimestamps() {
|
||||
// Use a fresh session cache so that we get the latest data from Datastore.
|
||||
return ofy.doWithFreshSessionCache(new Work<ImmutableMap<Integer, DateTime>>() {
|
||||
@Override
|
||||
public ImmutableMap<Integer, DateTime> run() {
|
||||
ImmutableMap.Builder<Integer, DateTime> results = new ImmutableMap.Builder<>();
|
||||
for (CommitLogBucket bucket : CommitLogBucket.loadAllBuckets()) {
|
||||
results.put(bucket.getBucketNum(), bucket.getLastWrittenTime());
|
||||
}
|
||||
return results.build();
|
||||
}});
|
||||
return ofy.doWithFreshSessionCache(
|
||||
() -> {
|
||||
ImmutableMap.Builder<Integer, DateTime> results = new ImmutableMap.Builder<>();
|
||||
for (CommitLogBucket bucket : CommitLogBucket.loadAllBuckets()) {
|
||||
results.put(bucket.getBucketNum(), bucket.getLastWrittenTime());
|
||||
}
|
||||
return results.build();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -30,7 +30,6 @@ import com.google.auto.value.AutoValue;
|
|||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMultiset;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Work;
|
||||
import google.registry.config.RegistryConfig.Config;
|
||||
import google.registry.mapreduce.MapreduceRunner;
|
||||
import google.registry.mapreduce.inputs.CommitLogManifestInput;
|
||||
|
@ -282,39 +281,36 @@ public final class DeleteOldCommitLogsAction implements Runnable {
|
|||
return;
|
||||
}
|
||||
|
||||
DeletionResult deletionResult = ofy().transactNew(new Work<DeletionResult>() {
|
||||
@Override
|
||||
public DeletionResult run() {
|
||||
CommitLogManifest manifest = ofy().load().key(manifestKey).now();
|
||||
// It is possible that the same manifestKey was run twice, if a shard had to be restarted
|
||||
// or some weird failure. If this happens, we want to exit immediately.
|
||||
// Note that this can never happen in dryRun.
|
||||
if (manifest == null) {
|
||||
return DeletionResult.create(DeletionResult.Status.ALREADY_DELETED, 0);
|
||||
}
|
||||
// Doing a sanity check on the date. This is the only place we use the CommitLogManifest,
|
||||
// so maybe removing this test will improve performance. However, unless it's proven that
|
||||
// the performance boost is significant (and we've tested this enough to be sure it never
|
||||
// happens)- the safty of "let's not delete stuff we need from prod" is more important.
|
||||
if (manifest.getCommitTime().isAfter(deletionThreshold)) {
|
||||
return DeletionResult.create(DeletionResult.Status.AFTER_THRESHOLD, 0);
|
||||
}
|
||||
Iterable<Key<CommitLogMutation>> commitLogMutationKeys = ofy().load()
|
||||
.type(CommitLogMutation.class)
|
||||
.ancestor(manifestKey)
|
||||
.keys()
|
||||
.iterable();
|
||||
ImmutableList<Key<?>> keysToDelete = ImmutableList.<Key<?>>builder()
|
||||
.addAll(commitLogMutationKeys)
|
||||
.add(manifestKey)
|
||||
.build();
|
||||
// Normally in a dry run we would log the entities that would be deleted, but those can
|
||||
// number in the millions so we skip the logging.
|
||||
if (!isDryRun) {
|
||||
ofy().deleteWithoutBackup().keys(keysToDelete);
|
||||
}
|
||||
return DeletionResult.create(DeletionResult.Status.SUCCESS, keysToDelete.size());
|
||||
DeletionResult deletionResult = ofy().transactNew(() -> {
|
||||
CommitLogManifest manifest = ofy().load().key(manifestKey).now();
|
||||
// It is possible that the same manifestKey was run twice, if a shard had to be restarted
|
||||
// or some weird failure. If this happens, we want to exit immediately.
|
||||
// Note that this can never happen in dryRun.
|
||||
if (manifest == null) {
|
||||
return DeletionResult.create(DeletionResult.Status.ALREADY_DELETED, 0);
|
||||
}
|
||||
// Doing a sanity check on the date. This is the only place we use the CommitLogManifest,
|
||||
// so maybe removing this test will improve performance. However, unless it's proven that
|
||||
// the performance boost is significant (and we've tested this enough to be sure it never
|
||||
// happens)- the safty of "let's not delete stuff we need from prod" is more important.
|
||||
if (manifest.getCommitTime().isAfter(deletionThreshold)) {
|
||||
return DeletionResult.create(DeletionResult.Status.AFTER_THRESHOLD, 0);
|
||||
}
|
||||
Iterable<Key<CommitLogMutation>> commitLogMutationKeys = ofy().load()
|
||||
.type(CommitLogMutation.class)
|
||||
.ancestor(manifestKey)
|
||||
.keys()
|
||||
.iterable();
|
||||
ImmutableList<Key<?>> keysToDelete = ImmutableList.<Key<?>>builder()
|
||||
.addAll(commitLogMutationKeys)
|
||||
.add(manifestKey)
|
||||
.build();
|
||||
// Normally in a dry run we would log the entities that would be deleted, but those can
|
||||
// number in the millions so we skip the logging.
|
||||
if (!isDryRun) {
|
||||
ofy().deleteWithoutBackup().keys(keysToDelete);
|
||||
}
|
||||
return DeletionResult.create(DeletionResult.Status.SUCCESS, keysToDelete.size());
|
||||
});
|
||||
|
||||
switch (deletionResult.status()) {
|
||||
|
|
|
@ -38,7 +38,6 @@ import com.google.common.collect.ImmutableSet;
|
|||
import com.google.common.collect.Range;
|
||||
import com.google.common.collect.Streams;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Work;
|
||||
import google.registry.mapreduce.MapreduceRunner;
|
||||
import google.registry.mapreduce.inputs.NullInput;
|
||||
import google.registry.model.EppResource;
|
||||
|
@ -151,89 +150,86 @@ public class ExpandRecurringBillingEventsAction implements Runnable {
|
|||
getContext().incrementCounter("Recurring billing events ignored");
|
||||
return;
|
||||
}
|
||||
int billingEventsSaved = 0;
|
||||
int numBillingEventsSaved = 0;
|
||||
try {
|
||||
billingEventsSaved = ofy().transactNew(new Work<Integer>() {
|
||||
@Override
|
||||
public Integer run() {
|
||||
ImmutableSet.Builder<OneTime> syntheticOneTimesBuilder =
|
||||
new ImmutableSet.Builder<>();
|
||||
final Registry tld = Registry.get(getTldFromDomainName(recurring.getTargetId()));
|
||||
numBillingEventsSaved = ofy().transactNew(() -> {
|
||||
ImmutableSet.Builder<OneTime> syntheticOneTimesBuilder =
|
||||
new ImmutableSet.Builder<>();
|
||||
final Registry tld = Registry.get(getTldFromDomainName(recurring.getTargetId()));
|
||||
|
||||
// Determine the complete set of times at which this recurring event should occur
|
||||
// (up to and including the runtime of the mapreduce).
|
||||
Iterable<DateTime> eventTimes =
|
||||
recurring.getRecurrenceTimeOfYear().getInstancesInRange(Range.closed(
|
||||
recurring.getEventTime(),
|
||||
earliestOf(recurring.getRecurrenceEndTime(), executeTime)));
|
||||
// Determine the complete set of times at which this recurring event should occur
|
||||
// (up to and including the runtime of the mapreduce).
|
||||
Iterable<DateTime> eventTimes =
|
||||
recurring.getRecurrenceTimeOfYear().getInstancesInRange(Range.closed(
|
||||
recurring.getEventTime(),
|
||||
earliestOf(recurring.getRecurrenceEndTime(), executeTime)));
|
||||
|
||||
// Convert these event times to billing times
|
||||
final ImmutableSet<DateTime> billingTimes =
|
||||
getBillingTimesInScope(eventTimes, cursorTime, executeTime, tld);
|
||||
// Convert these event times to billing times
|
||||
final ImmutableSet<DateTime> billingTimes =
|
||||
getBillingTimesInScope(eventTimes, cursorTime, executeTime, tld);
|
||||
|
||||
Key<? extends EppResource> domainKey = recurring.getParentKey().getParent();
|
||||
Iterable<OneTime> oneTimesForDomain =
|
||||
ofy().load().type(OneTime.class).ancestor(domainKey);
|
||||
Key<? extends EppResource> domainKey = recurring.getParentKey().getParent();
|
||||
Iterable<OneTime> oneTimesForDomain =
|
||||
ofy().load().type(OneTime.class).ancestor(domainKey);
|
||||
|
||||
// Determine the billing times that already have OneTime events persisted.
|
||||
ImmutableSet<DateTime> existingBillingTimes =
|
||||
getExistingBillingTimes(oneTimesForDomain, recurring);
|
||||
// Determine the billing times that already have OneTime events persisted.
|
||||
ImmutableSet<DateTime> existingBillingTimes =
|
||||
getExistingBillingTimes(oneTimesForDomain, recurring);
|
||||
|
||||
ImmutableSet.Builder<HistoryEntry> historyEntriesBuilder =
|
||||
new ImmutableSet.Builder<>();
|
||||
// Create synthetic OneTime events for all billing times that do not yet have an event
|
||||
// persisted.
|
||||
for (DateTime billingTime : difference(billingTimes, existingBillingTimes)) {
|
||||
// Construct a new HistoryEntry that parents over the OneTime
|
||||
HistoryEntry historyEntry = new HistoryEntry.Builder()
|
||||
.setBySuperuser(false)
|
||||
.setClientId(recurring.getClientId())
|
||||
.setModificationTime(ofy().getTransactionTime())
|
||||
.setParent(domainKey)
|
||||
.setPeriod(Period.create(1, YEARS))
|
||||
.setReason("Domain autorenewal by ExpandRecurringBillingEventsAction")
|
||||
.setRequestedByRegistrar(false)
|
||||
.setType(DOMAIN_AUTORENEW)
|
||||
.setDomainTransactionRecords(
|
||||
ImmutableSet.of(
|
||||
DomainTransactionRecord.create(
|
||||
tld.getTldStr(),
|
||||
// We report this when the autorenew grace period ends
|
||||
billingTime,
|
||||
TransactionReportField.netRenewsFieldFromYears(1),
|
||||
1)))
|
||||
.build();
|
||||
historyEntriesBuilder.add(historyEntry);
|
||||
ImmutableSet.Builder<HistoryEntry> historyEntriesBuilder =
|
||||
new ImmutableSet.Builder<>();
|
||||
// Create synthetic OneTime events for all billing times that do not yet have an event
|
||||
// persisted.
|
||||
for (DateTime billingTime : difference(billingTimes, existingBillingTimes)) {
|
||||
// Construct a new HistoryEntry that parents over the OneTime
|
||||
HistoryEntry historyEntry = new HistoryEntry.Builder()
|
||||
.setBySuperuser(false)
|
||||
.setClientId(recurring.getClientId())
|
||||
.setModificationTime(ofy().getTransactionTime())
|
||||
.setParent(domainKey)
|
||||
.setPeriod(Period.create(1, YEARS))
|
||||
.setReason("Domain autorenewal by ExpandRecurringBillingEventsAction")
|
||||
.setRequestedByRegistrar(false)
|
||||
.setType(DOMAIN_AUTORENEW)
|
||||
.setDomainTransactionRecords(
|
||||
ImmutableSet.of(
|
||||
DomainTransactionRecord.create(
|
||||
tld.getTldStr(),
|
||||
// We report this when the autorenew grace period ends
|
||||
billingTime,
|
||||
TransactionReportField.netRenewsFieldFromYears(1),
|
||||
1)))
|
||||
.build();
|
||||
historyEntriesBuilder.add(historyEntry);
|
||||
|
||||
DateTime eventTime = billingTime.minus(tld.getAutoRenewGracePeriodLength());
|
||||
// Determine the cost for a one-year renewal.
|
||||
Money renewCost = getDomainRenewCost(recurring.getTargetId(), eventTime, 1);
|
||||
syntheticOneTimesBuilder.add(new BillingEvent.OneTime.Builder()
|
||||
.setBillingTime(billingTime)
|
||||
.setClientId(recurring.getClientId())
|
||||
.setCost(renewCost)
|
||||
.setEventTime(eventTime)
|
||||
.setFlags(union(recurring.getFlags(), Flag.SYNTHETIC))
|
||||
.setParent(historyEntry)
|
||||
.setPeriodYears(1)
|
||||
.setReason(recurring.getReason())
|
||||
.setSyntheticCreationTime(executeTime)
|
||||
.setCancellationMatchingBillingEvent(Key.create(recurring))
|
||||
.setTargetId(recurring.getTargetId())
|
||||
.build());
|
||||
}
|
||||
Set<HistoryEntry> historyEntries = historyEntriesBuilder.build();
|
||||
Set<OneTime> syntheticOneTimes = syntheticOneTimesBuilder.build();
|
||||
if (!isDryRun) {
|
||||
ImmutableSet<ImmutableObject> entitiesToSave =
|
||||
new ImmutableSet.Builder<ImmutableObject>()
|
||||
.addAll(historyEntries)
|
||||
.addAll(syntheticOneTimes)
|
||||
.build();
|
||||
ofy().save().entities(entitiesToSave).now();
|
||||
}
|
||||
return syntheticOneTimes.size();
|
||||
DateTime eventTime = billingTime.minus(tld.getAutoRenewGracePeriodLength());
|
||||
// Determine the cost for a one-year renewal.
|
||||
Money renewCost = getDomainRenewCost(recurring.getTargetId(), eventTime, 1);
|
||||
syntheticOneTimesBuilder.add(new OneTime.Builder()
|
||||
.setBillingTime(billingTime)
|
||||
.setClientId(recurring.getClientId())
|
||||
.setCost(renewCost)
|
||||
.setEventTime(eventTime)
|
||||
.setFlags(union(recurring.getFlags(), Flag.SYNTHETIC))
|
||||
.setParent(historyEntry)
|
||||
.setPeriodYears(1)
|
||||
.setReason(recurring.getReason())
|
||||
.setSyntheticCreationTime(executeTime)
|
||||
.setCancellationMatchingBillingEvent(Key.create(recurring))
|
||||
.setTargetId(recurring.getTargetId())
|
||||
.build());
|
||||
}
|
||||
Set<HistoryEntry> historyEntries = historyEntriesBuilder.build();
|
||||
Set<OneTime> syntheticOneTimes = syntheticOneTimesBuilder.build();
|
||||
if (!isDryRun) {
|
||||
ImmutableSet<ImmutableObject> entitiesToSave =
|
||||
new ImmutableSet.Builder<ImmutableObject>()
|
||||
.addAll(historyEntries)
|
||||
.addAll(syntheticOneTimes)
|
||||
.build();
|
||||
ofy().save().entities(entitiesToSave).now();
|
||||
}
|
||||
return syntheticOneTimes.size();
|
||||
});
|
||||
} catch (Throwable t) {
|
||||
logger.severefmt(
|
||||
|
@ -243,10 +239,10 @@ public class ExpandRecurringBillingEventsAction implements Runnable {
|
|||
throw t;
|
||||
}
|
||||
if (!isDryRun) {
|
||||
getContext().incrementCounter("Saved OneTime billing events", billingEventsSaved);
|
||||
getContext().incrementCounter("Saved OneTime billing events", numBillingEventsSaved);
|
||||
} else {
|
||||
getContext().incrementCounter(
|
||||
"Generated OneTime billing events (dry run)", billingEventsSaved);
|
||||
"Generated OneTime billing events (dry run)", numBillingEventsSaved);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,6 @@ import com.google.appengine.tools.mapreduce.Reducer;
|
|||
import com.google.appengine.tools.mapreduce.ReducerInput;
|
||||
import com.google.appengine.tools.mapreduce.inputs.DatastoreKeyInput;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.googlecode.objectify.Key;
|
||||
|
@ -141,9 +140,10 @@ public class VerifyEntityIntegrityAction implements Runnable {
|
|||
ImmutableSet.Builder<Input<? extends Object>> builder =
|
||||
new ImmutableSet.Builder<Input<? extends Object>>()
|
||||
.add(EppResourceInputs.createIndexInput());
|
||||
for (Class<?> clazz : RESOURCE_CLASSES) {
|
||||
builder.add(new DatastoreKeyInput(getKind(clazz), NUM_SHARDS));
|
||||
}
|
||||
RESOURCE_CLASSES
|
||||
.stream()
|
||||
.map(clazz -> new DatastoreKeyInput(getKind(clazz), NUM_SHARDS))
|
||||
.forEach(builder::add);
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
@ -265,17 +265,7 @@ public class VerifyEntityIntegrityAction implements Runnable {
|
|||
.getTransferData()
|
||||
.getServerApproveEntities()
|
||||
.stream()
|
||||
.map(
|
||||
new Function<
|
||||
Key<? extends TransferServerApproveEntity>,
|
||||
Key<TransferServerApproveEntity>>() {
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public Key<TransferServerApproveEntity> apply(
|
||||
Key<? extends TransferServerApproveEntity> key) {
|
||||
return (Key<TransferServerApproveEntity>) key;
|
||||
}
|
||||
})
|
||||
.map(VerifyEntityIntegrityMapper::castTransferServerApproveEntityKey)
|
||||
.collect(toImmutableSet()));
|
||||
verifyExistence(key, domain.getApplication());
|
||||
verifyExistence(key, domain.getAutorenewBillingEvent());
|
||||
|
@ -306,6 +296,12 @@ public class VerifyEntityIntegrityAction implements Runnable {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static Key<TransferServerApproveEntity> castTransferServerApproveEntityKey(
|
||||
Key<? extends TransferServerApproveEntity> key) {
|
||||
return (Key<TransferServerApproveEntity>) key;
|
||||
}
|
||||
|
||||
private void mapForeignKeyIndex(ForeignKeyIndex<?> fki) {
|
||||
Key<ForeignKeyIndex<?>> fkiKey = Key.create(fki);
|
||||
@SuppressWarnings("cast")
|
||||
|
|
|
@ -55,7 +55,6 @@ import com.google.common.base.Function;
|
|||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableTable;
|
||||
import com.google.common.io.BaseEncoding;
|
||||
import com.google.common.util.concurrent.AsyncFunction;
|
||||
import com.google.common.util.concurrent.Futures;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import com.google.common.util.concurrent.ListeningExecutorService;
|
||||
|
@ -454,15 +453,7 @@ public class BigqueryConnection implements AutoCloseable {
|
|||
.setQuery(new JobConfigurationQuery()
|
||||
.setQuery(querySql)
|
||||
.setDefaultDataset(getDataset())));
|
||||
return transform(
|
||||
runJobToCompletion(job),
|
||||
new Function<Job, ImmutableTable<Integer, TableFieldSchema, Object>>() {
|
||||
@Override
|
||||
public ImmutableTable<Integer, TableFieldSchema, Object> apply(Job job) {
|
||||
return getQueryResults(job);
|
||||
}
|
||||
},
|
||||
directExecutor());
|
||||
return transform(runJobToCompletion(job), this::getQueryResults, directExecutor());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -593,12 +584,7 @@ public class BigqueryConnection implements AutoCloseable {
|
|||
DestinationTable tempTable = buildTemporaryTable().build();
|
||||
return transformAsync(
|
||||
query(querySql, tempTable),
|
||||
new AsyncFunction<DestinationTable, String>() {
|
||||
@Override
|
||||
public ListenableFuture<String> apply(DestinationTable tempTable) {
|
||||
return extractTable(tempTable, destinationUri, destinationFormat, printHeader);
|
||||
}
|
||||
},
|
||||
tempTable1 -> extractTable(tempTable1, destinationUri, destinationFormat, printHeader),
|
||||
directExecutor());
|
||||
}
|
||||
|
||||
|
|
|
@ -15,11 +15,11 @@
|
|||
package google.registry.export;
|
||||
|
||||
import static com.google.common.collect.ImmutableSortedSet.toImmutableSortedSet;
|
||||
import static google.registry.model.EntityClasses.CLASS_TO_KIND_FUNCTION;
|
||||
import static google.registry.util.TypeUtils.hasAnnotation;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Ordering;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.model.EntityClasses;
|
||||
import google.registry.model.annotations.NotBackedUp;
|
||||
import google.registry.model.annotations.ReportedOn;
|
||||
|
@ -36,7 +36,7 @@ public final class ExportConstants {
|
|||
.stream()
|
||||
.filter(hasAnnotation(VirtualEntity.class).negate())
|
||||
.filter(hasAnnotation(NotBackedUp.class).negate())
|
||||
.map(CLASS_TO_KIND_FUNCTION)
|
||||
.map(Key::getKind)
|
||||
.collect(toImmutableSortedSet(Ordering.natural()));
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ public final class ExportConstants {
|
|||
.stream()
|
||||
.filter(hasAnnotation(ReportedOn.class))
|
||||
.filter(hasAnnotation(VirtualEntity.class).negate())
|
||||
.map(CLASS_TO_KIND_FUNCTION)
|
||||
.map(Key::getKind)
|
||||
.collect(toImmutableSortedSet(Ordering.natural()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ import com.google.common.collect.ImmutableList;
|
|||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Work;
|
||||
import google.registry.flows.EppException.AuthorizationErrorException;
|
||||
import google.registry.flows.EppException.InvalidAuthorizationInformationErrorException;
|
||||
import google.registry.flows.EppException.ObjectDoesNotExistException;
|
||||
|
@ -179,31 +178,28 @@ public final class ResourceFlowUtils {
|
|||
EppException failfastException =
|
||||
ofy()
|
||||
.doTransactionless(
|
||||
new Work<EppException>() {
|
||||
@Override
|
||||
public EppException run() {
|
||||
final ForeignKeyIndex<R> fki =
|
||||
ForeignKeyIndex.load(resourceClass, targetId, now);
|
||||
if (fki == null) {
|
||||
return new ResourceDoesNotExistException(resourceClass, targetId);
|
||||
}
|
||||
/* Query for the first few linked domains, and if found, actually load them. The
|
||||
* query is eventually consistent and so might be very stale, but the direct
|
||||
* load will not be stale, just non-transactional. If we find at least one
|
||||
* actual reference then we can reliably fail. If we don't find any, we can't
|
||||
* trust the query and need to do the full mapreduce.
|
||||
*/
|
||||
Iterable<Key<DomainBase>> keys =
|
||||
queryForLinkedDomains(fki.getResourceKey(), now)
|
||||
.limit(FAILFAST_CHECK_COUNT)
|
||||
.keys();
|
||||
Predicate<DomainBase> predicate =
|
||||
domain ->
|
||||
getPotentialReferences.apply(domain).contains(fki.getResourceKey());
|
||||
return ofy().load().keys(keys).values().stream().anyMatch(predicate)
|
||||
? new ResourceToDeleteIsReferencedException()
|
||||
: null;
|
||||
() -> {
|
||||
final ForeignKeyIndex<R> fki =
|
||||
ForeignKeyIndex.load(resourceClass, targetId, now);
|
||||
if (fki == null) {
|
||||
return new ResourceDoesNotExistException(resourceClass, targetId);
|
||||
}
|
||||
/* Query for the first few linked domains, and if found, actually load them. The
|
||||
* query is eventually consistent and so might be very stale, but the direct
|
||||
* load will not be stale, just non-transactional. If we find at least one
|
||||
* actual reference then we can reliably fail. If we don't find any, we can't
|
||||
* trust the query and need to do the full mapreduce.
|
||||
*/
|
||||
Iterable<Key<DomainBase>> keys =
|
||||
queryForLinkedDomains(fki.getResourceKey(), now)
|
||||
.limit(FAILFAST_CHECK_COUNT)
|
||||
.keys();
|
||||
Predicate<DomainBase> predicate =
|
||||
domain ->
|
||||
getPotentialReferences.apply(domain).contains(fki.getResourceKey());
|
||||
return ofy().load().keys(keys).values().stream().anyMatch(predicate)
|
||||
? new ResourceToDeleteIsReferencedException()
|
||||
: null;
|
||||
});
|
||||
if (failfastException != null) {
|
||||
throw failfastException;
|
||||
|
|
|
@ -23,7 +23,6 @@ import static google.registry.model.ofy.ObjectifyService.ofy;
|
|||
import static google.registry.util.DateTimeUtils.isBeforeOrAt;
|
||||
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Work;
|
||||
import google.registry.flows.EppException;
|
||||
import google.registry.flows.EppException.AuthorizationErrorException;
|
||||
import google.registry.flows.EppException.ObjectDoesNotExistException;
|
||||
|
@ -131,11 +130,7 @@ public class PollAckFlow implements TransactionalFlow {
|
|||
// acked, then we return a special status code indicating that. Note that the query will
|
||||
// include the message being acked.
|
||||
|
||||
int messageCount = ofy().doTransactionless(new Work<Integer>() {
|
||||
@Override
|
||||
public Integer run() {
|
||||
return getPollMessagesQuery(clientId, now).count();
|
||||
}});
|
||||
int messageCount = ofy().doTransactionless(() -> getPollMessagesQuery(clientId, now).count());
|
||||
if (!includeAckedMessageInCount) {
|
||||
messageCount--;
|
||||
}
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
package google.registry.mapreduce.inputs;
|
||||
|
||||
import static com.google.common.collect.ImmutableSet.toImmutableSet;
|
||||
import static google.registry.model.EntityClasses.CLASS_TO_KIND_FUNCTION;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
|
||||
import com.google.appengine.api.datastore.Cursor;
|
||||
|
@ -138,6 +137,6 @@ abstract class EppResourceBaseReader<T> extends InputReader<T> {
|
|||
// Ignore EppResource when finding kinds, since it doesn't have one and doesn't imply filtering.
|
||||
return resourceClasses.contains(EppResource.class)
|
||||
? ImmutableSet.of()
|
||||
: resourceClasses.stream().map(CLASS_TO_KIND_FUNCTION).collect(toImmutableSet());
|
||||
: resourceClasses.stream().map(Key::getKind).collect(toImmutableSet());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,9 +14,7 @@
|
|||
|
||||
package google.registry.model;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.googlecode.objectify.Key;
|
||||
import google.registry.model.billing.BillingEvent;
|
||||
import google.registry.model.billing.RegistrarBillingEntry;
|
||||
import google.registry.model.billing.RegistrarCredit;
|
||||
|
@ -113,15 +111,5 @@ public final class EntityClasses {
|
|||
SignedMarkRevocationList.class,
|
||||
TmchCrl.class);
|
||||
|
||||
/**
|
||||
* Function that converts an Objectify-registered class to its Datastore kind name.
|
||||
*
|
||||
* <p>Note that this mapping is not one-to-one, since polymorphic subclasses of an entity all have
|
||||
* the same Datastore kind. (In theory, two distinct top-level entities could also map to the same
|
||||
* kind since it's just {@code class.getSimpleName()}, but we test against that.)
|
||||
*/
|
||||
public static final Function<Class<? extends ImmutableObject>, String> CLASS_TO_KIND_FUNCTION =
|
||||
(Class<? extends ImmutableObject> clazz) -> Key.getKind(clazz);
|
||||
|
||||
private EntityClasses() {}
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
package google.registry.model;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.collect.Iterables.transform;
|
||||
import static com.google.common.collect.ImmutableSet.toImmutableSet;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.util.DateTimeUtils.isAtOrAfter;
|
||||
import static google.registry.util.DateTimeUtils.isBeforeOrAt;
|
||||
|
@ -160,11 +160,15 @@ public final class EppResourceUtils {
|
|||
*/
|
||||
public static <T extends EppResource> Iterable<T> queryNotDeleted(
|
||||
Class<T> clazz, DateTime now, String filterDefinition, Object filterValue) {
|
||||
return transform(
|
||||
ofy().load().type(clazz)
|
||||
.filter(filterDefinition, filterValue)
|
||||
.filter("deletionTime >", now.toDate()),
|
||||
EppResourceUtils.transformAtTime(now));
|
||||
return ofy()
|
||||
.load()
|
||||
.type(clazz)
|
||||
.filter(filterDefinition, filterValue)
|
||||
.filter("deletionTime >", now.toDate())
|
||||
.list()
|
||||
.stream()
|
||||
.map(EppResourceUtils.transformAtTime(now))
|
||||
.collect(toImmutableSet());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -262,15 +266,13 @@ public final class EppResourceUtils {
|
|||
(isAtOrAfter(timestamp, resource.getUpdateAutoTimestamp().getTimestamp()))
|
||||
? new ResultNow<>(resource)
|
||||
: loadMostRecentRevisionAtTime(resource, timestamp);
|
||||
return new Result<T>() {
|
||||
@Override
|
||||
public T now() {
|
||||
T loadedResource = loadResult.now();
|
||||
return loadedResource == null ? null
|
||||
: (isActive(loadedResource, timestamp)
|
||||
? cloneProjectedAtTime(loadedResource, timestamp)
|
||||
: null);
|
||||
}};
|
||||
return () -> {
|
||||
T loadedResource = loadResult.now();
|
||||
return (loadedResource == null) ? null
|
||||
: (isActive(loadedResource, timestamp)
|
||||
? cloneProjectedAtTime(loadedResource, timestamp)
|
||||
: null);
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -290,19 +292,16 @@ public final class EppResourceUtils {
|
|||
}
|
||||
final Result<CommitLogMutation> mutationResult =
|
||||
ofy().load().key(CommitLogMutation.createKey(revision, resourceKey));
|
||||
return new Result<T>() {
|
||||
@Override
|
||||
public T now() {
|
||||
CommitLogMutation mutation = mutationResult.now();
|
||||
if (mutation != null) {
|
||||
return ofy().load().fromEntity(mutation.getEntity());
|
||||
}
|
||||
logger.severefmt(
|
||||
"Couldn't load mutation for revision at %s for %s, falling back to resource."
|
||||
+ " Revision: %s",
|
||||
timestamp, resourceKey, revision);
|
||||
return resource;
|
||||
return () -> {
|
||||
CommitLogMutation mutation = mutationResult.now();
|
||||
if (mutation != null) {
|
||||
return ofy().load().fromEntity(mutation.getEntity());
|
||||
}
|
||||
logger.severefmt(
|
||||
"Couldn't load mutation for revision at %s for %s, falling back to resource."
|
||||
+ " Revision: %s",
|
||||
timestamp, resourceKey, revision);
|
||||
return resource;
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@ import com.google.common.base.MoreObjects;
|
|||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Work;
|
||||
import google.registry.model.EppResource;
|
||||
import google.registry.model.ImmutableObject;
|
||||
import google.registry.model.contact.ContactResource;
|
||||
|
@ -447,12 +446,8 @@ public class DomainCommand {
|
|||
private static <T extends EppResource> ImmutableMap<String, Key<T>> loadByForeignKey(
|
||||
final Set<String> foreignKeys, final Class<T> clazz, final DateTime now)
|
||||
throws InvalidReferencesException {
|
||||
Map<String, ForeignKeyIndex<T>> fkis = ofy().doTransactionless(
|
||||
new Work<Map<String, ForeignKeyIndex<T>>>() {
|
||||
@Override
|
||||
public Map<String, ForeignKeyIndex<T>> run() {
|
||||
return ForeignKeyIndex.load(clazz, foreignKeys, now);
|
||||
}});
|
||||
Map<String, ForeignKeyIndex<T>> fkis =
|
||||
ofy().doTransactionless(() -> ForeignKeyIndex.load(clazz, foreignKeys, now));
|
||||
if (!fkis.keySet().equals(foreignKeys)) {
|
||||
throw new InvalidReferencesException(
|
||||
clazz, ImmutableSet.copyOf(difference(foreignKeys, fkis.keySet())));
|
||||
|
|
|
@ -21,7 +21,6 @@ import static google.registry.util.CollectionUtils.isNullOrEmpty;
|
|||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Work;
|
||||
import com.googlecode.objectify.annotation.Entity;
|
||||
import com.googlecode.objectify.annotation.Id;
|
||||
import google.registry.model.BackupGroupRoot;
|
||||
|
@ -100,17 +99,15 @@ public class DomainApplicationIndex extends BackupGroupRoot {
|
|||
return ImmutableSet.of();
|
||||
}
|
||||
// Perform eventually consistent query, to avoid overenlisting cross entity groups
|
||||
return ofy().doTransactionless(new Work<ImmutableSet<DomainApplication>>() {
|
||||
@Override
|
||||
public ImmutableSet<DomainApplication> run() {
|
||||
ImmutableSet.Builder<DomainApplication> apps = new ImmutableSet.Builder<>();
|
||||
for (DomainApplication app : ofy().load().keys(index.getKeys()).values()) {
|
||||
if (app.getDeletionTime().isAfter(now)) {
|
||||
apps.add(app);
|
||||
}
|
||||
}
|
||||
return apps.build();
|
||||
}});
|
||||
return ofy().doTransactionless(() -> {
|
||||
ImmutableSet.Builder<DomainApplication> apps = new ImmutableSet.Builder<>();
|
||||
for (DomainApplication app : ofy().load().keys(index.getKeys()).values()) {
|
||||
if (app.getDeletionTime().isAfter(now)) {
|
||||
apps.add(app);
|
||||
}
|
||||
}
|
||||
return apps.build();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -274,26 +274,24 @@ public class Ofy {
|
|||
* its own retryable read-only transaction.
|
||||
*/
|
||||
private <R> Boolean checkIfAlreadySucceeded(final CommitLoggedWork<R> work) {
|
||||
return work.hasRun() && transactNewReadOnly(new Work<Boolean>() {
|
||||
@Override
|
||||
public Boolean run() {
|
||||
CommitLogManifest manifest = work.getManifest();
|
||||
if (manifest == null) {
|
||||
// Work ran but no commit log was created. This might mean that the transaction did not
|
||||
// write anything to Datastore. We can safely retry because it only reads. (Although the
|
||||
// transaction might have written a task to a queue, we consider that safe to retry too
|
||||
// since we generally assume that tasks might be doubly executed.) Alternatively it
|
||||
// might mean that the transaction wrote to Datastore but turned off commit logs by
|
||||
// exclusively using save/deleteWithoutBackups() rather than save/delete(). Although we
|
||||
// have no hard proof that retrying is safe, we use these methods judiciously and it is
|
||||
// reasonable to assume that if the transaction really did succeed that the retry will
|
||||
// either be idempotent or will fail with a non-transient error.
|
||||
return false;
|
||||
}
|
||||
return Objects.equals(
|
||||
union(work.getMutations(), manifest),
|
||||
ImmutableSet.copyOf(load().ancestor(manifest)));
|
||||
}});
|
||||
return work.hasRun() && transactNewReadOnly(() -> {
|
||||
CommitLogManifest manifest = work.getManifest();
|
||||
if (manifest == null) {
|
||||
// Work ran but no commit log was created. This might mean that the transaction did not
|
||||
// write anything to Datastore. We can safely retry because it only reads. (Although the
|
||||
// transaction might have written a task to a queue, we consider that safe to retry too
|
||||
// since we generally assume that tasks might be doubly executed.) Alternatively it
|
||||
// might mean that the transaction wrote to Datastore but turned off commit logs by
|
||||
// exclusively using save/deleteWithoutBackups() rather than save/delete(). Although we
|
||||
// have no hard proof that retrying is safe, we use these methods judiciously and it is
|
||||
// reasonable to assume that if the transaction really did succeed that the retry will
|
||||
// either be idempotent or will fail with a non-transient error.
|
||||
return false;
|
||||
}
|
||||
return Objects.equals(
|
||||
union(work.getMutations(), manifest),
|
||||
ImmutableSet.copyOf(load().ancestor(manifest)));
|
||||
});
|
||||
}
|
||||
|
||||
/** A read-only transaction is useful to get strongly consistent reads at a shared timestamp. */
|
||||
|
|
|
@ -193,9 +193,7 @@ public class Registrar extends ImmutableObject implements Buildable, Jsonifiable
|
|||
* Compare two instances of {@link RegistrarContact} by their email addresses lexicographically.
|
||||
*/
|
||||
private static final Comparator<RegistrarContact> CONTACT_EMAIL_COMPARATOR =
|
||||
comparing(
|
||||
(RegistrarContact arg) -> arg.getEmailAddress(),
|
||||
(String leftProperty, String rightProperty) -> leftProperty.compareTo(rightProperty));
|
||||
comparing(RegistrarContact::getEmailAddress, String::compareTo);
|
||||
|
||||
/**
|
||||
* A caching {@link Supplier} of a clientId to {@link Registrar} map.
|
||||
|
@ -208,9 +206,7 @@ public class Registrar extends ImmutableObject implements Buildable, Jsonifiable
|
|||
() ->
|
||||
ofy()
|
||||
.doTransactionless(
|
||||
() -> {
|
||||
return Maps.uniqueIndex(loadAll(), Registrar::getClientId);
|
||||
}));
|
||||
() -> Maps.uniqueIndex(loadAll(), Registrar::getClientId)));
|
||||
|
||||
@Parent
|
||||
Key<EntityGroupRoot> parent = getCrossTldKey();
|
||||
|
|
|
@ -32,7 +32,6 @@ import com.google.common.collect.ImmutableMap;
|
|||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Streams;
|
||||
import com.google.common.net.InternetDomainName;
|
||||
import com.googlecode.objectify.Work;
|
||||
import google.registry.model.registry.Registry.TldType;
|
||||
import java.util.Optional;
|
||||
|
||||
|
@ -55,17 +54,14 @@ public final class Registries {
|
|||
() ->
|
||||
ofy()
|
||||
.doTransactionless(
|
||||
new Work<ImmutableMap<String, TldType>>() {
|
||||
@Override
|
||||
public ImmutableMap<String, TldType> run() {
|
||||
ImmutableMap.Builder<String, TldType> builder =
|
||||
new ImmutableMap.Builder<>();
|
||||
for (Registry registry :
|
||||
ofy().load().type(Registry.class).ancestor(getCrossTldKey())) {
|
||||
builder.put(registry.getTldStr(), registry.getTldType());
|
||||
}
|
||||
return builder.build();
|
||||
() -> {
|
||||
ImmutableMap.Builder<String, TldType> builder =
|
||||
new ImmutableMap.Builder<>();
|
||||
for (Registry registry :
|
||||
ofy().load().type(Registry.class).ancestor(getCrossTldKey())) {
|
||||
builder.put(registry.getTldStr(), registry.getTldType());
|
||||
}
|
||||
return builder.build();
|
||||
}));
|
||||
}
|
||||
|
||||
|
|
|
@ -43,7 +43,6 @@ import com.google.common.collect.Ordering;
|
|||
import com.google.common.collect.Range;
|
||||
import com.google.common.net.InternetDomainName;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Work;
|
||||
import com.googlecode.objectify.annotation.Embed;
|
||||
import com.googlecode.objectify.annotation.Entity;
|
||||
import com.googlecode.objectify.annotation.Id;
|
||||
|
@ -245,15 +244,10 @@ public class Registry extends ImmutableObject implements Buildable {
|
|||
return Optional.ofNullable(
|
||||
ofy()
|
||||
.doTransactionless(
|
||||
new Work<Registry>() {
|
||||
@Override
|
||||
public Registry run() {
|
||||
return ofy()
|
||||
.load()
|
||||
.key(Key.create(getCrossTldKey(), Registry.class, tld))
|
||||
.now();
|
||||
}
|
||||
}));
|
||||
() -> ofy()
|
||||
.load()
|
||||
.key(Key.create(getCrossTldKey(), Registry.class, tld))
|
||||
.now()));
|
||||
}
|
||||
});
|
||||
|
||||
|
|
|
@ -37,7 +37,6 @@ import com.google.common.collect.ImmutableSet;
|
|||
import com.google.common.collect.Streams;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.VoidWork;
|
||||
import com.googlecode.objectify.Work;
|
||||
import google.registry.model.registry.Registry;
|
||||
import google.registry.model.registry.label.DomainLabelMetrics.PremiumListCheckOutcome;
|
||||
import google.registry.model.registry.label.PremiumList.PremiumListEntry;
|
||||
|
@ -162,28 +161,26 @@ public final class PremiumListUtils {
|
|||
}
|
||||
|
||||
// Save the new PremiumList and revision itself.
|
||||
PremiumList updated = ofy().transactNew(new Work<PremiumList>() {
|
||||
@Override
|
||||
public PremiumList run() {
|
||||
DateTime now = ofy().getTransactionTime();
|
||||
// Assert that the premium list hasn't been changed since we started this process.
|
||||
PremiumList existing = ofy().load()
|
||||
.type(PremiumList.class)
|
||||
.parent(getCrossTldKey())
|
||||
.id(premiumList.getName())
|
||||
.now();
|
||||
checkState(
|
||||
Objects.equals(existing, oldPremiumList.orElse(null)),
|
||||
"PremiumList was concurrently edited");
|
||||
PremiumList newList = premiumList.asBuilder()
|
||||
.setLastUpdateTime(now)
|
||||
.setCreationTime(
|
||||
oldPremiumList.isPresent() ? oldPremiumList.get().creationTime : now)
|
||||
.setRevision(newRevisionKey)
|
||||
.build();
|
||||
ofy().save().entities(newList, newRevision);
|
||||
return newList;
|
||||
}});
|
||||
PremiumList updated = ofy().transactNew(() -> {
|
||||
DateTime now = ofy().getTransactionTime();
|
||||
// Assert that the premium list hasn't been changed since we started this process.
|
||||
PremiumList existing = ofy().load()
|
||||
.type(PremiumList.class)
|
||||
.parent(getCrossTldKey())
|
||||
.id(premiumList.getName())
|
||||
.now();
|
||||
checkState(
|
||||
Objects.equals(existing, oldPremiumList.orElse(null)),
|
||||
"PremiumList was concurrently edited");
|
||||
PremiumList newList = premiumList.asBuilder()
|
||||
.setLastUpdateTime(now)
|
||||
.setCreationTime(
|
||||
oldPremiumList.isPresent() ? oldPremiumList.get().creationTime : now)
|
||||
.setRevision(newRevisionKey)
|
||||
.build();
|
||||
ofy().save().entities(newList, newRevision);
|
||||
return newList;
|
||||
});
|
||||
// Update the cache.
|
||||
cachePremiumLists.put(premiumList.getName(), updated);
|
||||
// Delete the entities under the old PremiumList.
|
||||
|
|
|
@ -22,7 +22,6 @@ import com.google.auto.value.AutoValue;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Strings;
|
||||
import com.googlecode.objectify.VoidWork;
|
||||
import com.googlecode.objectify.Work;
|
||||
import com.googlecode.objectify.annotation.Entity;
|
||||
import com.googlecode.objectify.annotation.Id;
|
||||
import google.registry.model.ImmutableObject;
|
||||
|
@ -171,39 +170,37 @@ public class Lock extends ImmutableObject {
|
|||
// It's important to use transactNew rather than transact, because a Lock can be used to control
|
||||
// access to resources like GCS that can't be transactionally rolled back. Therefore, the lock
|
||||
// must be definitively acquired before it is used, even when called inside another transaction.
|
||||
AcquireResult acquireResult = ofy().transactNew(new Work<AcquireResult>() {
|
||||
@Override
|
||||
public AcquireResult run() {
|
||||
DateTime now = ofy().getTransactionTime();
|
||||
AcquireResult acquireResult = ofy().transactNew(() -> {
|
||||
DateTime now = ofy().getTransactionTime();
|
||||
|
||||
// Checking if an unexpired lock still exists - if so, the lock can't be acquired.
|
||||
Lock lock = ofy().load().type(Lock.class).id(lockId).now();
|
||||
if (lock != null) {
|
||||
logger.infofmt(
|
||||
"Loaded existing lock: %s for request: %s", lock.lockId, lock.requestLogId);
|
||||
}
|
||||
LockState lockState;
|
||||
if (lock == null) {
|
||||
lockState = LockState.FREE;
|
||||
} else if (isAtOrAfter(now, lock.expirationTime)) {
|
||||
lockState = LockState.TIMED_OUT;
|
||||
} else if (!requestStatusChecker.isRunning(lock.requestLogId)) {
|
||||
lockState = LockState.OWNER_DIED;
|
||||
} else {
|
||||
lockState = LockState.IN_USE;
|
||||
return AcquireResult.create(now, lock, null, lockState);
|
||||
}
|
||||
// Checking if an unexpired lock still exists - if so, the lock can't be acquired.
|
||||
Lock lock = ofy().load().type(Lock.class).id(lockId).now();
|
||||
if (lock != null) {
|
||||
logger.infofmt(
|
||||
"Loaded existing lock: %s for request: %s", lock.lockId, lock.requestLogId);
|
||||
}
|
||||
LockState lockState;
|
||||
if (lock == null) {
|
||||
lockState = LockState.FREE;
|
||||
} else if (isAtOrAfter(now, lock.expirationTime)) {
|
||||
lockState = LockState.TIMED_OUT;
|
||||
} else if (!requestStatusChecker.isRunning(lock.requestLogId)) {
|
||||
lockState = LockState.OWNER_DIED;
|
||||
} else {
|
||||
lockState = LockState.IN_USE;
|
||||
return AcquireResult.create(now, lock, null, lockState);
|
||||
}
|
||||
|
||||
Lock newLock = create(
|
||||
resourceName,
|
||||
tld,
|
||||
requestStatusChecker.getLogId(),
|
||||
now.plus(leaseLength));
|
||||
// Locks are not parented under an EntityGroupRoot (so as to avoid write contention) and
|
||||
// don't need to be backed up.
|
||||
ofy().saveWithoutBackup().entity(newLock);
|
||||
return AcquireResult.create(now, lock, newLock, lockState);
|
||||
}});
|
||||
Lock newLock = create(
|
||||
resourceName,
|
||||
tld,
|
||||
requestStatusChecker.getLogId(),
|
||||
now.plus(leaseLength));
|
||||
// Locks are not parented under an EntityGroupRoot (so as to avoid write contention) and
|
||||
// don't need to be backed up.
|
||||
ofy().saveWithoutBackup().entity(newLock);
|
||||
return AcquireResult.create(now, lock, newLock, lockState);
|
||||
});
|
||||
|
||||
logAcquireResult(acquireResult);
|
||||
lockMetrics.record(resourceName, tld, acquireResult.lockState());
|
||||
|
|
|
@ -21,7 +21,6 @@ import com.google.common.cache.CacheBuilder;
|
|||
import com.google.common.cache.CacheLoader;
|
||||
import com.google.common.cache.LoadingCache;
|
||||
import com.google.common.primitives.Longs;
|
||||
import com.googlecode.objectify.Work;
|
||||
import com.googlecode.objectify.annotation.Entity;
|
||||
import com.googlecode.objectify.annotation.Unindex;
|
||||
import google.registry.model.annotations.NotBackedUp;
|
||||
|
@ -55,18 +54,16 @@ public class ServerSecret extends CrossTldSingleton {
|
|||
return secret;
|
||||
}
|
||||
// Slow path - transactionally create a new ServerSecret (once per app setup).
|
||||
return ofy().transact(new Work<ServerSecret>() {
|
||||
@Override
|
||||
public ServerSecret run() {
|
||||
// Check again for an existing secret within the transaction to avoid races.
|
||||
ServerSecret secret = ofy().load().entity(new ServerSecret()).now();
|
||||
if (secret == null) {
|
||||
UUID uuid = UUID.randomUUID();
|
||||
secret = create(uuid.getMostSignificantBits(), uuid.getLeastSignificantBits());
|
||||
ofy().saveWithoutBackup().entity(secret).now();
|
||||
}
|
||||
return secret;
|
||||
}});
|
||||
return ofy().transact(() -> {
|
||||
// Check again for an existing secret within the transaction to avoid races.
|
||||
ServerSecret secret1 = ofy().load().entity(new ServerSecret()).now();
|
||||
if (secret1 == null) {
|
||||
UUID uuid = UUID.randomUUID();
|
||||
secret1 = create(uuid.getMostSignificantBits(), uuid.getLeastSignificantBits());
|
||||
ofy().saveWithoutBackup().entity(secret1).now();
|
||||
}
|
||||
return secret1;
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@ import com.google.common.collect.FluentIterable;
|
|||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Work;
|
||||
import com.googlecode.objectify.annotation.EmbedMap;
|
||||
import com.googlecode.objectify.annotation.Entity;
|
||||
import com.googlecode.objectify.annotation.Id;
|
||||
|
@ -95,31 +94,28 @@ public class SignedMarkRevocationList extends ImmutableObject {
|
|||
() ->
|
||||
ofy()
|
||||
.transactNewReadOnly(
|
||||
new Work<SignedMarkRevocationList>() {
|
||||
@Override
|
||||
public SignedMarkRevocationList run() {
|
||||
Iterable<SignedMarkRevocationList> shards =
|
||||
ofy()
|
||||
.load()
|
||||
.type(SignedMarkRevocationList.class)
|
||||
.ancestor(getCrossTldKey());
|
||||
DateTime creationTime =
|
||||
isEmpty(shards)
|
||||
? START_OF_TIME
|
||||
: checkNotNull(
|
||||
Iterables.get(shards, 0).creationTime, "creationTime");
|
||||
ImmutableMap.Builder<String, DateTime> revokes =
|
||||
new ImmutableMap.Builder<>();
|
||||
for (SignedMarkRevocationList shard : shards) {
|
||||
revokes.putAll(shard.revokes);
|
||||
checkState(
|
||||
creationTime.equals(shard.creationTime),
|
||||
"Inconsistent creation times: %s vs. %s",
|
||||
creationTime,
|
||||
shard.creationTime);
|
||||
}
|
||||
return create(creationTime, revokes.build());
|
||||
() -> {
|
||||
Iterable<SignedMarkRevocationList> shards =
|
||||
ofy()
|
||||
.load()
|
||||
.type(SignedMarkRevocationList.class)
|
||||
.ancestor(getCrossTldKey());
|
||||
DateTime creationTime =
|
||||
isEmpty(shards)
|
||||
? START_OF_TIME
|
||||
: checkNotNull(
|
||||
Iterables.get(shards, 0).creationTime, "creationTime");
|
||||
ImmutableMap.Builder<String, DateTime> revokes =
|
||||
new ImmutableMap.Builder<>();
|
||||
for (SignedMarkRevocationList shard : shards) {
|
||||
revokes.putAll(shard.revokes);
|
||||
checkState(
|
||||
creationTime.equals(shard.creationTime),
|
||||
"Inconsistent creation times: %s vs. %s",
|
||||
creationTime,
|
||||
shard.creationTime);
|
||||
}
|
||||
return create(creationTime, revokes.build());
|
||||
}));
|
||||
|
||||
/** Return a single logical instance that combines all Datastore shards. */
|
||||
|
|
|
@ -26,7 +26,6 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
import com.google.common.base.Supplier;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Work;
|
||||
import com.googlecode.objectify.annotation.EmbedMap;
|
||||
import com.googlecode.objectify.annotation.Entity;
|
||||
import com.googlecode.objectify.annotation.Id;
|
||||
|
@ -112,15 +111,12 @@ public class ClaimsListShard extends ImmutableObject {
|
|||
(final Key<ClaimsListShard> key) ->
|
||||
ofy()
|
||||
.transactNewReadOnly(
|
||||
new Work<ClaimsListShard>() {
|
||||
@Override
|
||||
public ClaimsListShard run() {
|
||||
ClaimsListShard claimsListShard = ofy().load().key(key).now();
|
||||
checkState(
|
||||
claimsListShard != null,
|
||||
"Key not found when loading claims list shards.");
|
||||
return claimsListShard;
|
||||
}
|
||||
() -> {
|
||||
ClaimsListShard claimsListShard = ofy().load().key(key).now();
|
||||
checkState(
|
||||
claimsListShard != null,
|
||||
"Key not found when loading claims list shards.");
|
||||
return claimsListShard;
|
||||
}));
|
||||
|
||||
// Combine the shards together and return the concatenated ClaimsList.
|
||||
|
|
|
@ -460,7 +460,7 @@ public class RdapDomainSearchAction extends RdapActionBase {
|
|||
}
|
||||
Streams.stream(query)
|
||||
.filter(domain -> isAuthorized(domain, now))
|
||||
.forEach(domain -> domainSetBuilder.add(domain));
|
||||
.forEach(domainSetBuilder::add);
|
||||
}
|
||||
List<DomainResource> domains = domainSetBuilder.build().asList();
|
||||
metricInformationBuilder.setNumHostsRetrieved(numHostKeysSearched);
|
||||
|
|
|
@ -19,7 +19,6 @@ import static google.registry.model.ofy.ObjectifyService.ofy;
|
|||
import static google.registry.util.DateTimeUtils.isBeforeOrAt;
|
||||
|
||||
import com.google.common.collect.ImmutableSetMultimap;
|
||||
import com.googlecode.objectify.Work;
|
||||
import google.registry.config.RegistryConfig.Config;
|
||||
import google.registry.model.common.Cursor;
|
||||
import google.registry.model.common.Cursor.CursorType;
|
||||
|
@ -107,16 +106,16 @@ public final class PendingDepositChecker {
|
|||
final Registry registry,
|
||||
final CursorType cursorType,
|
||||
final DateTime initialValue) {
|
||||
return ofy().transact(new Work<DateTime>() {
|
||||
@Override
|
||||
public DateTime run() {
|
||||
Cursor cursor = ofy().load().key(Cursor.createKey(cursorType, registry)).now();
|
||||
if (cursor != null) {
|
||||
return cursor.getCursorTime();
|
||||
}
|
||||
ofy().save().entity(Cursor.create(cursorType, initialValue, registry));
|
||||
return initialValue;
|
||||
}});
|
||||
return ofy()
|
||||
.transact(
|
||||
() -> {
|
||||
Cursor cursor = ofy().load().key(Cursor.createKey(cursorType, registry)).now();
|
||||
if (cursor != null) {
|
||||
return cursor.getCursorTime();
|
||||
}
|
||||
ofy().save().entity(Cursor.create(cursorType, initialValue, registry));
|
||||
return initialValue;
|
||||
});
|
||||
}
|
||||
|
||||
private static DateTime advanceToDayOfWeek(DateTime date, int dayOfWeek) {
|
||||
|
|
|
@ -26,7 +26,6 @@ import com.google.appengine.tools.mapreduce.Mapper;
|
|||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.net.InternetDomainName;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Work;
|
||||
import google.registry.config.RegistryConfig.Config;
|
||||
import google.registry.flows.host.HostFlowUtils;
|
||||
import google.registry.mapreduce.MapreduceRunner;
|
||||
|
@ -110,37 +109,34 @@ public class RdeHostLinkAction implements Runnable {
|
|||
try {
|
||||
final InternetDomainName hostName = InternetDomainName.from(xjcHost.getName());
|
||||
|
||||
HostLinkResult hostLinkResult = ofy().transact(new Work<HostLinkResult>() {
|
||||
@Override
|
||||
public HostLinkResult run() {
|
||||
Optional<DomainResource> superordinateDomain =
|
||||
lookupSuperordinateDomain(hostName, ofy().getTransactionTime());
|
||||
// if suporordinateDomain is absent, this is an out of zone host and can't be linked.
|
||||
// absent is only returned for out of zone hosts, and an exception is thrown for in
|
||||
// zone hosts with no superordinate domain.
|
||||
if (!superordinateDomain.isPresent()) {
|
||||
return HostLinkResult.HOST_OUT_OF_ZONE;
|
||||
}
|
||||
if (superordinateDomain.get().getStatusValues().contains(StatusValue.PENDING_DELETE)) {
|
||||
return HostLinkResult.SUPERORDINATE_DOMAIN_IN_PENDING_DELETE;
|
||||
}
|
||||
Key<DomainResource> superordinateDomainKey = Key.create(superordinateDomain.get());
|
||||
// link host to superordinate domain and set time of last superordinate change to
|
||||
// the time of the import
|
||||
HostResource host =
|
||||
ofy().load().now(Key.create(HostResource.class, xjcHost.getRoid()));
|
||||
if (host == null) {
|
||||
return HostLinkResult.HOST_NOT_FOUND;
|
||||
}
|
||||
// link domain to subordinate host
|
||||
ofy().save().<EppResource>entities(
|
||||
host.asBuilder().setSuperordinateDomain(superordinateDomainKey)
|
||||
.setLastSuperordinateChange(ofy().getTransactionTime())
|
||||
.build(),
|
||||
superordinateDomain.get().asBuilder()
|
||||
.addSubordinateHost(host.getFullyQualifiedHostName()).build());
|
||||
return HostLinkResult.HOST_LINKED;
|
||||
HostLinkResult hostLinkResult = ofy().transact(() -> {
|
||||
Optional<DomainResource> superordinateDomain =
|
||||
lookupSuperordinateDomain(hostName, ofy().getTransactionTime());
|
||||
// if suporordinateDomain is absent, this is an out of zone host and can't be linked.
|
||||
// absent is only returned for out of zone hosts, and an exception is thrown for in
|
||||
// zone hosts with no superordinate domain.
|
||||
if (!superordinateDomain.isPresent()) {
|
||||
return HostLinkResult.HOST_OUT_OF_ZONE;
|
||||
}
|
||||
if (superordinateDomain.get().getStatusValues().contains(StatusValue.PENDING_DELETE)) {
|
||||
return HostLinkResult.SUPERORDINATE_DOMAIN_IN_PENDING_DELETE;
|
||||
}
|
||||
Key<DomainResource> superordinateDomainKey = Key.create(superordinateDomain.get());
|
||||
// link host to superordinate domain and set time of last superordinate change to
|
||||
// the time of the import
|
||||
HostResource host =
|
||||
ofy().load().now(Key.create(HostResource.class, xjcHost.getRoid()));
|
||||
if (host == null) {
|
||||
return HostLinkResult.HOST_NOT_FOUND;
|
||||
}
|
||||
// link domain to subordinate host
|
||||
ofy().save().<EppResource>entities(
|
||||
host.asBuilder().setSuperordinateDomain(superordinateDomainKey)
|
||||
.setLastSuperordinateChange(ofy().getTransactionTime())
|
||||
.build(),
|
||||
superordinateDomain.get().asBuilder()
|
||||
.addSubordinateHost(host.getFullyQualifiedHostName()).build());
|
||||
return HostLinkResult.HOST_LINKED;
|
||||
});
|
||||
// increment counter and log appropriately based on result of transaction
|
||||
switch (hostLinkResult) {
|
||||
|
|
|
@ -196,7 +196,7 @@ public class IcannReportingStager {
|
|||
private String constructTotalRow(List<Integer> totals) {
|
||||
StringBuilder rowString = new StringBuilder("Totals,,");
|
||||
rowString.append(
|
||||
totals.stream().map((Integer i) -> i.toString()).collect(Collectors.joining(",")));
|
||||
totals.stream().map(Object::toString).collect(Collectors.joining(",")));
|
||||
return rowString.toString();
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,6 @@ import com.google.common.collect.ImmutableMap;
|
|||
import com.google.template.soy.data.SoyMapData;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.VoidWork;
|
||||
import com.googlecode.objectify.Work;
|
||||
import google.registry.flows.EppException;
|
||||
import google.registry.model.domain.DesignatedContact;
|
||||
import google.registry.model.domain.DomainApplication;
|
||||
|
@ -71,27 +70,24 @@ final class AllocateDomainCommand extends MutatingEppToolCommand {
|
|||
.append(
|
||||
ofy()
|
||||
.transactNewReadOnly(
|
||||
new Work<String>() {
|
||||
@Override
|
||||
public String run() {
|
||||
String failureMessage =
|
||||
ofy()
|
||||
.load()
|
||||
.keys(applicationKeys)
|
||||
.values()
|
||||
.stream()
|
||||
.map(
|
||||
application ->
|
||||
application.getApplicationStatus()
|
||||
== ApplicationStatus.ALLOCATED
|
||||
? null
|
||||
: application.getFullyQualifiedDomainName())
|
||||
.filter(Objects::nonNull)
|
||||
.collect(joining("\n"));
|
||||
return failureMessage.isEmpty()
|
||||
? "ALL SUCCEEDED"
|
||||
: addHeader("FAILURES", failureMessage);
|
||||
}
|
||||
() -> {
|
||||
String failureMessage =
|
||||
ofy()
|
||||
.load()
|
||||
.keys(applicationKeys)
|
||||
.values()
|
||||
.stream()
|
||||
.map(
|
||||
application ->
|
||||
application.getApplicationStatus()
|
||||
== ApplicationStatus.ALLOCATED
|
||||
? null
|
||||
: application.getFullyQualifiedDomainName())
|
||||
.filter(Objects::nonNull)
|
||||
.collect(joining("\n"));
|
||||
return failureMessage.isEmpty()
|
||||
? "ALL SUCCEEDED"
|
||||
: addHeader("FAILURES", failureMessage);
|
||||
}))
|
||||
.toString();
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import com.google.api.client.http.HttpHeaders;
|
|||
import com.google.api.client.http.HttpRequest;
|
||||
import com.google.api.client.http.HttpRequestFactory;
|
||||
import com.google.api.client.http.HttpResponse;
|
||||
import com.google.api.client.http.HttpUnsuccessfulResponseHandler;
|
||||
import com.google.common.base.Supplier;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
|
@ -98,20 +97,15 @@ class AppEngineConnection implements Connection {
|
|||
request.setFollowRedirects(false);
|
||||
request.setThrowExceptionOnExecuteError(false);
|
||||
request.setUnsuccessfulResponseHandler(
|
||||
new HttpUnsuccessfulResponseHandler() {
|
||||
@Override
|
||||
public boolean handleResponse(
|
||||
HttpRequest request, HttpResponse response, boolean supportsRetry)
|
||||
throws IOException {
|
||||
String errorTitle = extractHtmlTitle(getErrorHtmlAsString(response));
|
||||
throw new IOException(
|
||||
String.format(
|
||||
"Error from %s: %d %s%s",
|
||||
request.getUrl().toString(),
|
||||
response.getStatusCode(),
|
||||
response.getStatusMessage(),
|
||||
(errorTitle == null ? "" : ": " + errorTitle)));
|
||||
}
|
||||
(request1, response, supportsRetry) -> {
|
||||
String errorTitle = extractHtmlTitle(getErrorHtmlAsString(response));
|
||||
throw new IOException(
|
||||
String.format(
|
||||
"Error from %s: %d %s%s",
|
||||
request1.getUrl().toString(),
|
||||
response.getStatusCode(),
|
||||
response.getStatusMessage(),
|
||||
(errorTitle == null ? "" : ": " + errorTitle)));
|
||||
});
|
||||
HttpResponse response = null;
|
||||
try {
|
||||
|
|
|
@ -28,11 +28,11 @@ import com.google.common.base.Strings;
|
|||
import com.google.common.collect.ComparisonChain;
|
||||
import com.google.common.collect.FluentIterable;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableList.Builder;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Ordering;
|
||||
import com.google.common.net.InternetDomainName;
|
||||
import com.googlecode.objectify.Work;
|
||||
import google.registry.model.contact.ContactResource;
|
||||
import google.registry.model.domain.DomainApplication;
|
||||
import google.registry.tools.Command.RemoteApiCommand;
|
||||
|
@ -71,20 +71,17 @@ final class AuctionStatusCommand implements RemoteApiCommand {
|
|||
fullyQualifiedDomainName);
|
||||
return ofy()
|
||||
.transactNewReadOnly(
|
||||
new Work<Iterable<String>>() {
|
||||
@Override
|
||||
public Iterable<String> run() {
|
||||
ImmutableList.Builder<DomainApplication> applications =
|
||||
new ImmutableList.Builder<>();
|
||||
for (String domain : domains) {
|
||||
applications.addAll(
|
||||
loadActiveApplicationsByDomainName(
|
||||
domain, ofy().getTransactionTime()));
|
||||
}
|
||||
return Lists.transform(
|
||||
ImmutableList.sortedCopyOf(ORDERING, applications.build()),
|
||||
APPLICATION_FORMATTER);
|
||||
() -> {
|
||||
Builder<DomainApplication> applications =
|
||||
new Builder<>();
|
||||
for (String domain : domains) {
|
||||
applications.addAll(
|
||||
loadActiveApplicationsByDomainName(
|
||||
domain, ofy().getTransactionTime()));
|
||||
}
|
||||
return Lists.transform(
|
||||
ImmutableList.sortedCopyOf(ORDERING, applications.build()),
|
||||
APPLICATION_FORMATTER);
|
||||
});
|
||||
}),
|
||||
UTF_8);
|
||||
|
|
|
@ -15,9 +15,7 @@
|
|||
package google.registry.tools;
|
||||
|
||||
import com.google.api.client.auth.oauth2.Credential;
|
||||
import com.google.api.client.http.HttpRequest;
|
||||
import com.google.api.client.http.HttpRequestFactory;
|
||||
import com.google.api.client.http.HttpRequestInitializer;
|
||||
import com.google.api.client.http.javanet.NetHttpTransport;
|
||||
import dagger.Binds;
|
||||
import dagger.Module;
|
||||
|
@ -50,14 +48,9 @@ class DefaultRequestFactoryModule {
|
|||
if (connectionFlags.getServer().getHost().equals("localhost")) {
|
||||
return new NetHttpTransport()
|
||||
.createRequestFactory(
|
||||
new HttpRequestInitializer() {
|
||||
@Override
|
||||
public void initialize(HttpRequest request) {
|
||||
request
|
||||
.getHeaders()
|
||||
.setCookie("dev_appserver_login=test@example.com:true:1858047912411");
|
||||
}
|
||||
});
|
||||
request -> request
|
||||
.getHeaders()
|
||||
.setCookie("dev_appserver_login=test@example.com:true:1858047912411"));
|
||||
} else {
|
||||
return new NetHttpTransport().createRequestFactory(credentialProvider.get());
|
||||
}
|
||||
|
|
|
@ -23,11 +23,9 @@ import static java.nio.charset.StandardCharsets.US_ASCII;
|
|||
|
||||
import com.beust.jcommander.Parameter;
|
||||
import com.beust.jcommander.Parameters;
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import google.registry.model.domain.DomainResource;
|
||||
import google.registry.model.domain.secdns.DelegationSignerData;
|
||||
import google.registry.model.host.HostResource;
|
||||
import google.registry.tools.Command.RemoteApiCommand;
|
||||
import google.registry.tools.params.PathParameter;
|
||||
|
@ -104,16 +102,12 @@ final class GenerateDnsReportCommand implements RemoteApiCommand {
|
|||
.getDsData()
|
||||
.stream()
|
||||
.map(
|
||||
new Function<DelegationSignerData, Map<String, ?>>() {
|
||||
@Override
|
||||
public Map<String, ?> apply(DelegationSignerData dsData) {
|
||||
return ImmutableMap.of(
|
||||
"keyTag", dsData.getKeyTag(),
|
||||
"algorithm", dsData.getAlgorithm(),
|
||||
"digestType", dsData.getDigestType(),
|
||||
"digest", base16().encode(dsData.getDigest()));
|
||||
}
|
||||
})
|
||||
dsData1 ->
|
||||
ImmutableMap.of(
|
||||
"keyTag", dsData1.getKeyTag(),
|
||||
"algorithm", dsData1.getAlgorithm(),
|
||||
"digestType", dsData1.getDigestType(),
|
||||
"digest", base16().encode(dsData1.getDigest())))
|
||||
.collect(toImmutableList());
|
||||
ImmutableMap.Builder<String, Object> mapBuilder = new ImmutableMap.Builder<>();
|
||||
mapBuilder.put("domain", domain.getFullyQualifiedDomainName());
|
||||
|
|
|
@ -20,7 +20,6 @@ import com.beust.jcommander.Parameter;
|
|||
import com.beust.jcommander.Parameters;
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.googlecode.objectify.Work;
|
||||
import google.registry.model.billing.RegistrarCredit;
|
||||
import google.registry.model.billing.RegistrarCreditBalance.BalanceMap;
|
||||
import google.registry.model.registrar.Registrar;
|
||||
|
@ -52,22 +51,19 @@ final class ListCreditsCommand implements RemoteApiCommand {
|
|||
private ImmutableList<String> createCreditStrings(final Registrar registrar) {
|
||||
return ofy()
|
||||
.transactNewReadOnly(
|
||||
new Work<ImmutableList<String>>() {
|
||||
@Override
|
||||
public ImmutableList<String> run() {
|
||||
ImmutableList.Builder<String> builder = new ImmutableList.Builder<>();
|
||||
for (RegistrarCredit credit : RegistrarCredit.loadAllForRegistrar(registrar)) {
|
||||
BalanceMap balanceMap = BalanceMap.createForCredit(credit);
|
||||
Optional<Money> activeBalance =
|
||||
balanceMap.getActiveBalanceAtTime(ofy().getTransactionTime());
|
||||
// Unless showAll is true, only show credits with a positive active balance (which
|
||||
// excludes just zero-balance credits since credit balances cannot be negative).
|
||||
if (showAll || (activeBalance.isPresent() && activeBalance.get().isPositive())) {
|
||||
builder.add(credit.getSummary() + "\n" + balanceMap);
|
||||
}
|
||||
() -> {
|
||||
ImmutableList.Builder<String> builder = new ImmutableList.Builder<>();
|
||||
for (RegistrarCredit credit : RegistrarCredit.loadAllForRegistrar(registrar)) {
|
||||
BalanceMap balanceMap = BalanceMap.createForCredit(credit);
|
||||
Optional<Money> activeBalance =
|
||||
balanceMap.getActiveBalanceAtTime(ofy().getTransactionTime());
|
||||
// Unless showAll is true, only show credits with a positive active balance (which
|
||||
// excludes just zero-balance credits since credit balances cannot be negative).
|
||||
if (showAll || (activeBalance.isPresent() && activeBalance.get().isPositive())) {
|
||||
builder.add(credit.getSummary() + "\n" + balanceMap);
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
return builder.build();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -71,15 +71,12 @@ public final class SoyTemplateUtils {
|
|||
return memoize(
|
||||
() -> {
|
||||
final ImmutableMap<String, String> renames = getCssRenames(cssMap, cssMapDebug);
|
||||
return new SoyCssRenamingMap() {
|
||||
@Override
|
||||
public String get(String cssClassName) {
|
||||
List<String> result = new ArrayList<>();
|
||||
for (String part : CSS_CLASS_SPLITTER.split(cssClassName)) {
|
||||
result.add(renames.getOrDefault(part, part));
|
||||
}
|
||||
return CSS_CLASS_JOINER.join(result);
|
||||
return (cssClassName) -> {
|
||||
List<String> result = new ArrayList<>();
|
||||
for (String part : CSS_CLASS_SPLITTER.split(cssClassName)) {
|
||||
result.add(renames.getOrDefault(part, part));
|
||||
}
|
||||
return CSS_CLASS_JOINER.join(result);
|
||||
};
|
||||
});
|
||||
}
|
||||
|
|
|
@ -18,13 +18,13 @@ import static com.google.common.collect.ImmutableList.toImmutableList;
|
|||
import static com.google.common.collect.Iterables.toArray;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Predicates;
|
||||
import com.google.common.collect.Streams;
|
||||
import google.registry.config.RegistryConfig.Config;
|
||||
import google.registry.util.FormattingLogger;
|
||||
import google.registry.util.NonFinalForTesting;
|
||||
import google.registry.util.SendEmailService;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import javax.inject.Inject;
|
||||
import javax.mail.Message;
|
||||
import javax.mail.internet.AddressException;
|
||||
|
@ -77,7 +77,7 @@ public class SendEmailUtils {
|
|||
return null;
|
||||
}
|
||||
})
|
||||
.filter(Predicates.notNull())
|
||||
.filter(Objects::nonNull)
|
||||
.collect(toImmutableList());
|
||||
if (emails.isEmpty()) {
|
||||
return false;
|
||||
|
|
|
@ -15,15 +15,11 @@
|
|||
package google.registry.util;
|
||||
|
||||
import com.google.appengine.api.datastore.Key;
|
||||
import com.google.common.base.Function;
|
||||
import java.util.Optional;
|
||||
|
||||
/** Utility methods for working with the App Engine Datastore service. */
|
||||
public class DatastoreServiceUtils {
|
||||
|
||||
/** Helper function that extracts the kind from a regular Datastore entity key. */
|
||||
public static final Function<Key, String> KEY_TO_KIND_FUNCTION = Key::getKind;
|
||||
|
||||
/** Returns the name or id of a key, which may be a string or a long. */
|
||||
public static Object getNameOrId(Key key) {
|
||||
return Optional.<Object>ofNullable(key.getName()).orElse(key.getId());
|
||||
|
|
|
@ -32,7 +32,6 @@ import google.registry.whois.WhoisException.UncheckedWhoisException;
|
|||
import google.registry.whois.WhoisMetrics.WhoisMetric;
|
||||
import google.registry.whois.WhoisResponse.WhoisResponseResults;
|
||||
import java.io.Reader;
|
||||
import java.util.concurrent.Callable;
|
||||
import javax.inject.Inject;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
|
@ -87,17 +86,14 @@ public class WhoisServer implements Runnable {
|
|||
metricBuilder.setCommand(command);
|
||||
WhoisResponseResults results =
|
||||
retrier.callWithRetry(
|
||||
new Callable<WhoisResponseResults>() {
|
||||
@Override
|
||||
public WhoisResponseResults call() {
|
||||
WhoisResponseResults results;
|
||||
try {
|
||||
results = command.executeQuery(now).getResponse(PREFER_UNICODE, disclaimer);
|
||||
} catch (WhoisException e) {
|
||||
throw new UncheckedWhoisException(e);
|
||||
}
|
||||
return results;
|
||||
() -> {
|
||||
WhoisResponseResults results1;
|
||||
try {
|
||||
results1 = command.executeQuery(now).getResponse(PREFER_UNICODE, disclaimer);
|
||||
} catch (WhoisException e) {
|
||||
throw new UncheckedWhoisException(e);
|
||||
}
|
||||
return results1;
|
||||
},
|
||||
DatastoreTimeoutException.class,
|
||||
DatastoreFailureException.class);
|
||||
|
|
|
@ -61,9 +61,7 @@ import org.mockito.ArgumentCaptor;
|
|||
import org.mockito.Captor;
|
||||
import org.mockito.Matchers;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.runners.MockitoJUnitRunner;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
/** Test case for {@link CloudDnsWriter}. */
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
|
@ -119,44 +117,36 @@ public class CloudDnsWriterTest {
|
|||
// Return records from our stub zone when a request to list the records is executed
|
||||
when(listResourceRecordSetsRequest.execute())
|
||||
.thenAnswer(
|
||||
new Answer<ResourceRecordSetsListResponse>() {
|
||||
@Override
|
||||
public ResourceRecordSetsListResponse answer(InvocationOnMock invocationOnMock)
|
||||
throws Throwable {
|
||||
return new ResourceRecordSetsListResponse()
|
||||
invocationOnMock ->
|
||||
new ResourceRecordSetsListResponse()
|
||||
.setRrsets(
|
||||
stubZone
|
||||
.stream()
|
||||
.filter(
|
||||
rs ->
|
||||
rs != null && rs.getName().equals(recordNameCaptor.getValue()))
|
||||
.collect(toImmutableList()));
|
||||
}
|
||||
});
|
||||
.collect(toImmutableList())));
|
||||
|
||||
when(changes.create(anyString(), zoneNameCaptor.capture(), changeCaptor.capture()))
|
||||
.thenReturn(createChangeRequest);
|
||||
// Change our stub zone when a request to change the records is executed
|
||||
when(createChangeRequest.execute())
|
||||
.thenAnswer(
|
||||
new Answer<Change>() {
|
||||
@Override
|
||||
public Change answer(InvocationOnMock invocationOnMock) throws IOException {
|
||||
Change requestedChange = changeCaptor.getValue();
|
||||
ImmutableSet<ResourceRecordSet> toDelete =
|
||||
ImmutableSet.copyOf(requestedChange.getDeletions());
|
||||
ImmutableSet<ResourceRecordSet> toAdd =
|
||||
ImmutableSet.copyOf(requestedChange.getAdditions());
|
||||
// Fail if the records to delete has records that aren't in the stub zone.
|
||||
// This matches documented Google Cloud DNS behavior.
|
||||
if (!Sets.difference(toDelete, stubZone).isEmpty()) {
|
||||
throw new IOException();
|
||||
}
|
||||
stubZone =
|
||||
Sets.union(Sets.difference(stubZone, toDelete).immutableCopy(), toAdd)
|
||||
.immutableCopy();
|
||||
return requestedChange;
|
||||
invocationOnMock -> {
|
||||
Change requestedChange = changeCaptor.getValue();
|
||||
ImmutableSet<ResourceRecordSet> toDelete =
|
||||
ImmutableSet.copyOf(requestedChange.getDeletions());
|
||||
ImmutableSet<ResourceRecordSet> toAdd =
|
||||
ImmutableSet.copyOf(requestedChange.getAdditions());
|
||||
// Fail if the records to delete has records that aren't in the stub zone.
|
||||
// This matches documented Google Cloud DNS behavior.
|
||||
if (!Sets.difference(toDelete, stubZone).isEmpty()) {
|
||||
throw new IOException();
|
||||
}
|
||||
stubZone =
|
||||
Sets.union(Sets.difference(stubZone, toDelete).immutableCopy(), toAdd)
|
||||
.immutableCopy();
|
||||
return requestedChange;
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -23,8 +23,8 @@ import static google.registry.testing.DatastoreHelper.persistActiveContact;
|
|||
import static google.registry.testing.DatastoreHelper.persistActiveHost;
|
||||
import static google.registry.testing.DatastoreHelper.persistResource;
|
||||
import static google.registry.testing.TestDataHelper.loadFileWithSubstitutions;
|
||||
import static google.registry.util.DatastoreServiceUtils.KEY_TO_KIND_FUNCTION;
|
||||
|
||||
import com.google.common.base.Predicates;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
|
@ -331,22 +331,20 @@ public class DomainApplicationInfoFlowTest
|
|||
int numPreviousReads = RequestCapturingAsyncDatastoreService.getReads().size();
|
||||
doSuccessfulTest("domain_info_sunrise_response.xml", HostsState.HOSTS_EXIST);
|
||||
// Get all of the keys loaded in the flow, with each distinct load() call as a list of keys.
|
||||
int numReadsWithContactsOrHosts =
|
||||
(int)
|
||||
RequestCapturingAsyncDatastoreService.getReads()
|
||||
.stream()
|
||||
.skip(numPreviousReads)
|
||||
.filter(
|
||||
keys ->
|
||||
keys.stream()
|
||||
.map(KEY_TO_KIND_FUNCTION)
|
||||
.anyMatch(
|
||||
kind ->
|
||||
ImmutableSet.of(
|
||||
Key.getKind(ContactResource.class),
|
||||
Key.getKind(HostResource.class))
|
||||
.contains(kind)))
|
||||
.count();
|
||||
long numReadsWithContactsOrHosts =
|
||||
RequestCapturingAsyncDatastoreService.getReads()
|
||||
.stream()
|
||||
.skip(numPreviousReads)
|
||||
.filter(
|
||||
keys ->
|
||||
keys.stream()
|
||||
.map(com.google.appengine.api.datastore.Key::getKind)
|
||||
.anyMatch(
|
||||
Predicates.in(
|
||||
ImmutableSet.of(
|
||||
Key.getKind(ContactResource.class),
|
||||
Key.getKind(HostResource.class)))))
|
||||
.count();
|
||||
assertThat(numReadsWithContactsOrHosts).isEqualTo(1);
|
||||
}
|
||||
|
||||
|
|
|
@ -24,8 +24,8 @@ import static google.registry.testing.DatastoreHelper.persistActiveContact;
|
|||
import static google.registry.testing.DatastoreHelper.persistActiveHost;
|
||||
import static google.registry.testing.DatastoreHelper.persistResource;
|
||||
import static google.registry.testing.TestDataHelper.loadFileWithSubstitutions;
|
||||
import static google.registry.util.DatastoreServiceUtils.KEY_TO_KIND_FUNCTION;
|
||||
|
||||
import com.google.common.base.Predicates;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.googlecode.objectify.Key;
|
||||
|
@ -644,13 +644,12 @@ public class DomainInfoFlowTest extends ResourceFlowTestCase<DomainInfoFlow, Dom
|
|||
.filter(
|
||||
keys ->
|
||||
keys.stream()
|
||||
.map(KEY_TO_KIND_FUNCTION)
|
||||
.map(com.google.appengine.api.datastore.Key::getKind)
|
||||
.anyMatch(
|
||||
kind ->
|
||||
Predicates.in(
|
||||
ImmutableSet.of(
|
||||
Key.getKind(ContactResource.class),
|
||||
Key.getKind(HostResource.class))
|
||||
.contains(kind)))
|
||||
Key.getKind(ContactResource.class),
|
||||
Key.getKind(HostResource.class)))))
|
||||
.count();
|
||||
assertThat(numReadsWithContactsOrHosts).isEqualTo(1);
|
||||
}
|
||||
|
|
|
@ -242,7 +242,7 @@ public class ComparatorKeyringTest {
|
|||
when(secondKeyring.getRdeSigningKey()).thenReturn(keyPair);
|
||||
Keyring comparatorKeyring = ComparatorKeyring.create(actualKeyring, secondKeyring);
|
||||
|
||||
assertThrows(KeyringException.class, () -> comparatorKeyring.getRdeSigningKey());
|
||||
assertThrows(KeyringException.class, comparatorKeyring::getRdeSigningKey);
|
||||
|
||||
assertAboutLogs()
|
||||
.that(testLogHandler)
|
||||
|
@ -278,7 +278,7 @@ public class ComparatorKeyringTest {
|
|||
when(secondKeyring.getRdeSigningKey()).thenThrow(new KeyringException("message"));
|
||||
Keyring comparatorKeyring = ComparatorKeyring.create(actualKeyring, secondKeyring);
|
||||
|
||||
assertThrows(KeyringException.class, () -> comparatorKeyring.getRdeSigningKey());
|
||||
assertThrows(KeyringException.class, comparatorKeyring::getRdeSigningKey);
|
||||
|
||||
assertAboutLogs().that(testLogHandler).hasNoLogsAtLevel(Level.SEVERE);
|
||||
}
|
||||
|
|
|
@ -17,11 +17,11 @@ package google.registry.model;
|
|||
import static com.google.common.collect.ImmutableSet.toImmutableSet;
|
||||
import static com.google.common.truth.Truth.assertThat;
|
||||
import static google.registry.model.EntityClasses.ALL_CLASSES;
|
||||
import static google.registry.model.EntityClasses.CLASS_TO_KIND_FUNCTION;
|
||||
import static google.registry.util.TypeUtils.hasAnnotation;
|
||||
import static java.util.stream.Collectors.toSet;
|
||||
|
||||
import com.google.common.collect.Ordering;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.annotation.Entity;
|
||||
import com.googlecode.objectify.annotation.EntitySubclass;
|
||||
import java.util.Set;
|
||||
|
@ -50,7 +50,7 @@ public class EntityClassesTest {
|
|||
ALL_CLASSES
|
||||
.stream()
|
||||
.filter(hasAnnotation(Entity.class))
|
||||
.map(CLASS_TO_KIND_FUNCTION)
|
||||
.map(Key::getKind)
|
||||
.collect(toImmutableSet()))
|
||||
.named("base entity kinds")
|
||||
.containsNoDuplicates();
|
||||
|
@ -62,13 +62,13 @@ public class EntityClassesTest {
|
|||
ALL_CLASSES
|
||||
.stream()
|
||||
.filter(hasAnnotation(Entity.class))
|
||||
.map(CLASS_TO_KIND_FUNCTION)
|
||||
.map(Key::getKind)
|
||||
.collect(toSet());
|
||||
Set<String> entitySubclassKinds =
|
||||
ALL_CLASSES
|
||||
.stream()
|
||||
.filter(hasAnnotation(EntitySubclass.class))
|
||||
.map(CLASS_TO_KIND_FUNCTION)
|
||||
.map(Key::getKind)
|
||||
.collect(toSet());
|
||||
assertThat(baseEntityKinds).named("base entity kinds").containsAllIn(entitySubclassKinds);
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import static java.util.Arrays.asList;
|
|||
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.io.ByteSource;
|
||||
import com.googlecode.objectify.Work;
|
||||
import google.registry.model.contact.ContactResource;
|
||||
import google.registry.model.contact.PostalInfo;
|
||||
import google.registry.model.eppcommon.StatusValue;
|
||||
|
@ -191,12 +190,7 @@ public class XjcToContactResourceConverterTest {
|
|||
}
|
||||
|
||||
private static ContactResource convertContactInTransaction(final XjcRdeContact xjcContact) {
|
||||
return ofy().transact(new Work<ContactResource>() {
|
||||
@Override
|
||||
public ContactResource run() {
|
||||
return XjcToContactResourceConverter.convertContact(xjcContact);
|
||||
}
|
||||
});
|
||||
return ofy().transact(() -> XjcToContactResourceConverter.convertContact(xjcContact));
|
||||
}
|
||||
|
||||
private XjcRdeContact loadContactFromRdeXml() throws Exception {
|
||||
|
|
|
@ -33,7 +33,6 @@ import com.google.common.base.Joiner;
|
|||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.io.ByteSource;
|
||||
import com.googlecode.objectify.Key;
|
||||
import com.googlecode.objectify.Work;
|
||||
import google.registry.model.billing.BillingEvent;
|
||||
import google.registry.model.billing.BillingEvent.Flag;
|
||||
import google.registry.model.billing.BillingEvent.Reason;
|
||||
|
@ -413,18 +412,15 @@ public class XjcToDomainResourceConverterTest {
|
|||
private static DomainResource convertDomainInTransaction(final XjcRdeDomain xjcDomain) {
|
||||
return ofy()
|
||||
.transact(
|
||||
new Work<DomainResource>() {
|
||||
@Override
|
||||
public DomainResource run() {
|
||||
HistoryEntry historyEntry = createHistoryEntryForDomainImport(xjcDomain);
|
||||
BillingEvent.Recurring autorenewBillingEvent =
|
||||
createAutoRenewBillingEventForDomainImport(xjcDomain, historyEntry);
|
||||
PollMessage.Autorenew autorenewPollMessage =
|
||||
createAutoRenewPollMessageForDomainImport(xjcDomain, historyEntry);
|
||||
ofy().save().entities(historyEntry, autorenewBillingEvent, autorenewPollMessage);
|
||||
return XjcToDomainResourceConverter.convertDomain(
|
||||
xjcDomain, autorenewBillingEvent, autorenewPollMessage);
|
||||
}
|
||||
() -> {
|
||||
HistoryEntry historyEntry = createHistoryEntryForDomainImport(xjcDomain);
|
||||
BillingEvent.Recurring autorenewBillingEvent =
|
||||
createAutoRenewBillingEventForDomainImport(xjcDomain, historyEntry);
|
||||
PollMessage.Autorenew autorenewPollMessage =
|
||||
createAutoRenewPollMessageForDomainImport(xjcDomain, historyEntry);
|
||||
ofy().save().entities(historyEntry, autorenewBillingEvent, autorenewPollMessage);
|
||||
return XjcToDomainResourceConverter.convertDomain(
|
||||
xjcDomain, autorenewBillingEvent, autorenewPollMessage);
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ import com.google.common.base.Joiner;
|
|||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.io.ByteSource;
|
||||
import com.google.common.net.InetAddresses;
|
||||
import com.googlecode.objectify.Work;
|
||||
import google.registry.model.eppcommon.StatusValue;
|
||||
import google.registry.model.host.HostResource;
|
||||
import google.registry.model.reporting.HistoryEntry;
|
||||
|
@ -135,12 +134,7 @@ public class XjcToHostResourceConverterTest extends ShardableTestCase {
|
|||
}
|
||||
|
||||
private static HostResource convertHostInTransaction(final XjcRdeHost xjcHost) {
|
||||
return ofy().transact(new Work<HostResource>() {
|
||||
@Override
|
||||
public HostResource run() {
|
||||
return XjcToHostResourceConverter.convert(xjcHost);
|
||||
}
|
||||
});
|
||||
return ofy().transact(() -> XjcToHostResourceConverter.convert(xjcHost));
|
||||
}
|
||||
|
||||
private XjcRdeHost loadHostFromRdeXml() throws Exception {
|
||||
|
|
|
@ -21,7 +21,6 @@ import static com.google.common.collect.ImmutableSet.toImmutableSet;
|
|||
import static com.google.common.collect.Multimaps.filterKeys;
|
||||
import static com.google.common.collect.Sets.difference;
|
||||
import static com.google.common.truth.Truth.assertThat;
|
||||
import static google.registry.model.EntityClasses.CLASS_TO_KIND_FUNCTION;
|
||||
import static google.registry.model.ofy.ObjectifyService.ofy;
|
||||
import static google.registry.testing.DatastoreHelper.createTld;
|
||||
import static google.registry.testing.DatastoreHelper.persistActiveContact;
|
||||
|
@ -78,7 +77,7 @@ public class KillAllEppResourcesActionTest extends MapreduceTestCase<KillAllEppR
|
|||
PollMessage.class,
|
||||
BillingEvent.OneTime.class,
|
||||
BillingEvent.Recurring.class)
|
||||
.map(CLASS_TO_KIND_FUNCTION)
|
||||
.map(Key::getKind)
|
||||
.collect(toImmutableSet());
|
||||
|
||||
private void runMapreduce() throws Exception {
|
||||
|
|
|
@ -291,7 +291,7 @@ public class CidrAddressBlockTest extends TestCase {
|
|||
Iterator<InetAddress> i = b2.iterator();
|
||||
i.next();
|
||||
i.next();
|
||||
assertThrows(NoSuchElementException.class, () -> i.next());
|
||||
assertThrows(NoSuchElementException.class, i::next);
|
||||
}
|
||||
|
||||
public void testSerializability() {
|
||||
|
|
|
@ -41,8 +41,6 @@ import org.junit.Test;
|
|||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.JUnit4;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
/** Unit tests for {@link UrlFetchUtils}. */
|
||||
@RunWith(JUnit4.class)
|
||||
|
@ -62,12 +60,13 @@ public class UrlFetchUtilsTest {
|
|||
public void setupRandomZeroes() throws Exception {
|
||||
Random random = mock(Random.class);
|
||||
inject.setStaticField(UrlFetchUtils.class, "random", random);
|
||||
doAnswer(new Answer<Void>() {
|
||||
@Override
|
||||
public Void answer(InvocationOnMock info) throws Throwable {
|
||||
Arrays.fill((byte[]) info.getArguments()[0], (byte) 0);
|
||||
return null;
|
||||
}}).when(random).nextBytes(any(byte[].class));
|
||||
doAnswer(
|
||||
info -> {
|
||||
Arrays.fill((byte[]) info.getArguments()[0], (byte) 0);
|
||||
return null;
|
||||
})
|
||||
.when(random)
|
||||
.nextBytes(any(byte[].class));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue