mv com/google/domain/registry google/registry

This change renames directories in preparation for the great package
rename. The repository is now in a broken state because the code
itself hasn't been updated. However this should ensure that git
correctly preserves history for each file.
This commit is contained in:
Justine Tunney 2016-05-13 18:55:08 -04:00
parent a41677aea1
commit 5012893c1d
2396 changed files with 0 additions and 0 deletions

View file

@ -0,0 +1,31 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
java_library(
name = "backup",
srcs = glob(["*.java"]),
deps = [
"//java/com/google/common/annotations",
"//java/com/google/common/base",
"//java/com/google/common/collect",
"//java/com/google/common/net",
"//java/com/google/common/primitives",
"//java/com/google/common/util/concurrent",
"//java/com/google/domain/registry/config",
"//java/com/google/domain/registry/cron",
"//java/com/google/domain/registry/model",
"//java/com/google/domain/registry/request",
"//java/com/google/domain/registry/util",
"//third_party/java/appengine:appengine-api",
"//third_party/java/appengine_gcs_client",
"//third_party/java/dagger",
"//third_party/java/joda_time",
"//third_party/java/json_simple",
"//third_party/java/jsr305_annotations",
"//third_party/java/jsr330_inject",
"//third_party/java/objectify:objectify-v4_1",
"//third_party/java/servlet/servlet_api",
],
)

View file

@ -0,0 +1,92 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.appengine.api.ThreadManager.currentRequestThreadFactory;
import static com.google.common.util.concurrent.MoreExecutors.listeningDecorator;
import static com.google.domain.registry.backup.ExportCommitLogDiffAction.LOWER_CHECKPOINT_TIME_PARAM;
import static com.google.domain.registry.backup.ExportCommitLogDiffAction.UPPER_CHECKPOINT_TIME_PARAM;
import static com.google.domain.registry.backup.RestoreCommitLogsAction.FROM_TIME_PARAM;
import static com.google.domain.registry.request.RequestParameters.extractRequiredDatetimeParameter;
import static com.google.domain.registry.request.RequestParameters.extractRequiredParameter;
import static java.util.concurrent.Executors.newFixedThreadPool;
import com.google.common.primitives.Ints;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.domain.registry.cron.CommitLogFanoutAction;
import com.google.domain.registry.request.HttpException.BadRequestException;
import com.google.domain.registry.request.Parameter;
import dagger.Module;
import dagger.Provides;
import org.joda.time.DateTime;
import java.lang.annotation.Documented;
import javax.inject.Qualifier;
import javax.servlet.http.HttpServletRequest;
/**
* Dagger module for backup package.
*
* @see "com.google.domain.registry.module.backend.BackendComponent"
*/
@Module
public final class BackupModule {
/** Dagger qualifier for backups. */
@Qualifier
@Documented
public static @interface Backups {}
/** Number of threads in the threaded executor. */
private static final int NUM_THREADS = 10;
@Provides
@Parameter("bucket")
static int provideBucket(HttpServletRequest req) {
String param = extractRequiredParameter(req, CommitLogFanoutAction.BUCKET_PARAM);
Integer bucket = Ints.tryParse(param);
if (bucket == null) {
throw new BadRequestException("Bad bucket id");
}
return bucket;
}
@Provides
@Parameter(LOWER_CHECKPOINT_TIME_PARAM)
static DateTime provideLowerCheckpointKey(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, LOWER_CHECKPOINT_TIME_PARAM);
}
@Provides
@Parameter(UPPER_CHECKPOINT_TIME_PARAM)
static DateTime provideUpperCheckpointKey(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, UPPER_CHECKPOINT_TIME_PARAM);
}
@Provides
@Parameter(FROM_TIME_PARAM)
static DateTime provideFromTime(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, FROM_TIME_PARAM);
}
@Provides
@Backups
static ListeningExecutorService provideListeningExecutorService() {
return listeningDecorator(newFixedThreadPool(NUM_THREADS, currentRequestThreadFactory()));
}
}

View file

@ -0,0 +1,75 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.domain.registry.model.ofy.ObjectifyService.ofy;
import com.google.appengine.api.datastore.EntityTranslator;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.ImmutableList;
import com.google.domain.registry.model.ImmutableObject;
import com.google.storage.onestore.v3.OnestoreEntity.EntityProto;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Iterator;
/** Utilities for working with backups. */
public class BackupUtils {
/** Keys for user metadata fields on commit log files in GCS. */
public static final class GcsMetadataKeys {
private GcsMetadataKeys() {}
public static final String NUM_TRANSACTIONS = "num_transactions";
public static final String LOWER_BOUND_CHECKPOINT = "lower_bound_checkpoint";
public static final String UPPER_BOUND_CHECKPOINT = "upper_bound_checkpoint";
}
/**
* Converts the given {@link ImmutableObject} to a raw datastore entity and write it to an
* {@link OutputStream} in delimited protocol buffer format.
*/
static void serializeEntity(ImmutableObject entity, OutputStream stream) throws IOException {
EntityTranslator.convertToPb(ofy().save().toEntity(entity)).writeDelimitedTo(stream);
}
/**
* Return an iterator of {@link ImmutableObject} instances deserialized from the given stream.
*
* <p>This parses out delimited protocol buffers for raw datastore entities and then Ofy-loads
* those as {@link ImmutableObject}.
*
* <p>The iterator reads from the stream on demand, and as such will fail if the stream is closed.
*/
public static Iterator<ImmutableObject> createDeserializingIterator(final InputStream input) {
return new AbstractIterator<ImmutableObject>() {
@Override
protected ImmutableObject computeNext() {
EntityProto proto = new EntityProto();
if (proto.parseDelimitedFrom(input)) { // False means end of stream; other errors throw.
return ofy().load().<ImmutableObject>fromEntity(EntityTranslator.createFromPb(proto));
}
return endOfData();
}};
}
public static ImmutableList<ImmutableObject> deserializeEntities(byte[] bytes) {
return ImmutableList.copyOf(createDeserializingIterator(new ByteArrayInputStream(bytes)));
}
}

View file

@ -0,0 +1,86 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static com.google.appengine.api.taskqueue.TaskOptions.Builder.withUrl;
import static com.google.domain.registry.backup.ExportCommitLogDiffAction.LOWER_CHECKPOINT_TIME_PARAM;
import static com.google.domain.registry.backup.ExportCommitLogDiffAction.UPPER_CHECKPOINT_TIME_PARAM;
import static com.google.domain.registry.model.ofy.ObjectifyService.ofy;
import static com.google.domain.registry.util.DateTimeUtils.isBeforeOrAt;
import static com.google.domain.registry.util.FormattingLogger.getLoggerForCallerClass;
import com.google.domain.registry.model.ofy.CommitLogCheckpoint;
import com.google.domain.registry.model.ofy.CommitLogCheckpointRoot;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.util.Clock;
import com.google.domain.registry.util.FormattingLogger;
import com.google.domain.registry.util.TaskEnqueuer;
import com.googlecode.objectify.VoidWork;
import org.joda.time.DateTime;
import javax.inject.Inject;
/**
* Action that saves commit log checkpoints to datastore and kicks off a diff export task.
*
* <p>We separate computing and saving the checkpoint from exporting it because the export to GCS
* is retryable but should not require the computation of a new checkpoint. Saving the checkpoint
* and enqueuing the export task are done transactionally, so any checkpoint that is saved will be
* exported to GCS very soon.
*
* <p>This action's supported method is GET rather than POST because it gets invoked via cron.
*/
@Action(
path = "/_dr/cron/commitLogCheckpoint",
method = Action.Method.GET,
automaticallyPrintOk = true)
public final class CommitLogCheckpointAction implements Runnable {
private static final FormattingLogger logger = getLoggerForCallerClass();
private static final String QUEUE_NAME = "export-commits";
@Inject Clock clock;
@Inject CommitLogCheckpointStrategy strategy;
@Inject TaskEnqueuer taskEnqueuer;
@Inject CommitLogCheckpointAction() {}
@Override
public void run() {
final CommitLogCheckpoint checkpoint = strategy.computeCheckpoint();
logger.info("Generated candidate checkpoint for time " + checkpoint.getCheckpointTime());
ofy().transact(new VoidWork() {
@Override
public void vrun() {
DateTime lastWrittenTime = CommitLogCheckpointRoot.loadRoot().getLastWrittenTime();
if (isBeforeOrAt(checkpoint.getCheckpointTime(), lastWrittenTime)) {
logger.info("Newer checkpoint already written at time: " + lastWrittenTime);
return;
}
ofy().saveWithoutBackup().entities(
checkpoint,
CommitLogCheckpointRoot.create(checkpoint.getCheckpointTime()));
// Enqueue a diff task between previous and current checkpoints.
taskEnqueuer.enqueue(
getQueue(QUEUE_NAME),
withUrl(ExportCommitLogDiffAction.PATH)
.param(LOWER_CHECKPOINT_TIME_PARAM, lastWrittenTime.toString())
.param(UPPER_CHECKPOINT_TIME_PARAM, checkpoint.getCheckpointTime().toString()));
}});
}
}

View file

@ -0,0 +1,180 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.common.collect.Iterables.getOnlyElement;
import static com.google.common.collect.Maps.transformValues;
import static com.google.domain.registry.model.ofy.CommitLogBucket.getBucketKey;
import static com.google.domain.registry.util.DateTimeUtils.END_OF_TIME;
import static com.google.domain.registry.util.DateTimeUtils.earliestOf;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.collect.ImmutableMap;
import com.google.domain.registry.model.ofy.CommitLogBucket;
import com.google.domain.registry.model.ofy.CommitLogCheckpoint;
import com.google.domain.registry.model.ofy.CommitLogManifest;
import com.google.domain.registry.model.ofy.Ofy;
import com.google.domain.registry.util.Clock;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.Work;
import org.joda.time.DateTime;
import java.util.List;
import java.util.Map.Entry;
import javax.inject.Inject;
/**
* Implementation of the procedure for determining point-in-time consistent commit log checkpoint.
*
* <p>This algorithm examines the recently written commit log data and uses a dual-read approach
* to determine a point-in-time consistent set of checkpoint times for the commit log buckets. By
* "consistent" we mean, generally speaking, that if the datastore were restored by replaying all
* the commit logs up to the checkpoint times of the buckets, the result would be transactionally
* correct; there must be no "holes" where restored state depends on non-restored state.
*
* <p>The consistency guarantee really has two parts, only one of which is provided by this
* algorithm. The procedure below guarantees only that if the resulting checkpoint includes any
* given commit log, it will also include all the commit logs that were both 1) actually written
* before that commit log "in real life", and 2) have an earlier timestamp than that commit log.
* (These criteria do not necessarily imply each other, due to the lack of a global shared clock.)
* The rest of the guarantee comes from our Ofy customizations, which ensure that any transaction
* that depends on state from a previous transaction does indeed have a later timestamp.
*
* <h2>Procedure description</h2>
* <pre>
* {@code
* ComputeCheckpoint() -> returns a set consisting of a timestamp c(b_i) for every bucket b_i
*
* 1) read off the latest commit timestamp t(b_i) for every bucket b_i
* 2) iterate over the buckets b_i a second time, and
* a) do a consistent query for the next commit timestamp t'(b_i) where t'(b_i) > t(b_i)
* b) if present, add this timestamp t'(b_i) to a set S
* 3) compute a threshold time T* representing a time before all commits in S, as follows:
* a) if S is empty, let T* = + (or the "end of time")
* b) else, let T* = T - Δ, for T = min(S) and some small Δ > 0
* 4) return the set given by: min(t(b_i), T*) for all b_i
* }
* </pre>
*
* <h2>Correctness proof of algorithm</h2>
*
* <p>{@literal
* As described above, the algorithm is correct as long as it can ensure the following: given a
* commit log X written at time t(X) to bucket b_x, and another commit log Y that was written "in
* real life" before X and for which t(Y) < t(X), then if X is included in the checkpoint, so is Y;
* that is, t(X) <= c(b_x) implies t(Y) <= c(b_y).
* }
*
* <p>{@literal
* To prove this, first note that we always have c(b_i) <= t(b_i) for every b_i, i.e. every commit
* log included in the checkpoint must have been seen in the first pass. Hence if X was included,
* then X must have been written by the time we started the second pass. But since Y was written
* "in real life" prior to X, we must have seen Y by the second pass too.
* }
*
* <p>{@literal
* Now assume towards a contradiction that X is indeed included but Y is not, i.e. that we have
* t(X) <= c(b_x) but t(Y) > c(b_y). If Y was seen in the first pass, i.e. t(Y) <= t(b_y), then by
* our assumption c(b_y) < t(Y) <= t(b_y), and therefore c(b_y) != t(b_y). By the definition of
* c(b_y) it must then equal T*, so we have T* < t(Y). However, this is a contradiction since
* t(Y) < t(X) and t(X) <= c(b_x) <= T*. If instead Y was seen in the second pass but not the
* first, t'(b_y) exists and we must have t'(b_y) <= t(Y), but then since T* < T <= t'(b_y) by
* definition, we again reach the contradiction T* < t(Y).
* }
*/
class CommitLogCheckpointStrategy {
@Inject Ofy ofy;
@Inject Clock clock;
@Inject CommitLogCheckpointStrategy() {}
/** Compute and return a new CommitLogCheckpoint for the current point in time. */
public CommitLogCheckpoint computeCheckpoint() {
DateTime checkpointTime = clock.nowUtc();
ImmutableMap<Integer, DateTime> firstPassTimes = readBucketTimestamps();
DateTime threshold = readNewCommitLogsAndFindThreshold(firstPassTimes);
return CommitLogCheckpoint.create(
checkpointTime,
computeBucketCheckpointTimes(firstPassTimes, threshold));
}
/**
* Returns a map from all bucket IDs to their current last written time values, fetched without
* a transaction so with no guarantee of consistency across buckets.
*/
@VisibleForTesting
ImmutableMap<Integer, DateTime> readBucketTimestamps() {
// Use a fresh session cache so that we get the latest data from datastore.
return ofy.doWithFreshSessionCache(new Work<ImmutableMap<Integer, DateTime>>() {
@Override
public ImmutableMap<Integer, DateTime> run() {
ImmutableMap.Builder<Integer, DateTime> results = new ImmutableMap.Builder<>();
for (CommitLogBucket bucket : CommitLogBucket.loadAllBuckets()) {
results.put(bucket.getBucketNum(), bucket.getLastWrittenTime());
}
return results.build();
}});
}
/**
* Returns a threshold value defined as the latest timestamp that is before all new commit logs,
* where "new" means having a commit time after the per-bucket timestamp in the given map.
* When no such commit logs exist, the threshold value is set to END_OF_TIME.
*/
@VisibleForTesting
DateTime readNewCommitLogsAndFindThreshold(ImmutableMap<Integer, DateTime> bucketTimes) {
DateTime timeBeforeAllNewCommits = END_OF_TIME;
for (Entry<Integer, DateTime> entry : bucketTimes.entrySet()) {
Key<CommitLogBucket> bucketKey = getBucketKey(entry.getKey());
DateTime bucketTime = entry.getValue();
// Add 1 to handle START_OF_TIME since 0 isn't a valid id - filter then uses >= instead of >.
Key<CommitLogManifest> keyForFilter =
Key.create(CommitLogManifest.create(bucketKey, bucketTime.plusMillis(1), null));
List<Key<CommitLogManifest>> manifestKeys =
ofy.load()
.type(CommitLogManifest.class)
.ancestor(bucketKey)
.filterKey(">=", keyForFilter)
.limit(1)
.keys()
.list();
if (!manifestKeys.isEmpty()) {
timeBeforeAllNewCommits = earliestOf(
timeBeforeAllNewCommits,
CommitLogManifest.extractCommitTime(getOnlyElement(manifestKeys)).minusMillis(1));
}
}
return timeBeforeAllNewCommits;
}
/**
* Returns the bucket checkpoint times produced by clamping the given set of bucket timestamps to
* at most the given threshold value.
*/
@VisibleForTesting
ImmutableMap<Integer, DateTime> computeBucketCheckpointTimes(
ImmutableMap<Integer, DateTime> firstPassTimes,
final DateTime threshold) {
return ImmutableMap.copyOf(transformValues(firstPassTimes, new Function<DateTime, DateTime>() {
@Override
public DateTime apply(DateTime firstPassTime) {
return earliestOf(firstPassTime, threshold);
}}));
}
}

View file

@ -0,0 +1,160 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.common.collect.ImmutableList.copyOf;
import static com.google.common.collect.Iterables.concat;
import static com.google.common.collect.Iterables.transform;
import static com.google.domain.registry.model.ofy.CommitLogBucket.getBucketKey;
import static com.google.domain.registry.request.Action.Method.POST;
import com.google.common.base.Function;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.model.ofy.CommitLogBucket;
import com.google.domain.registry.model.ofy.CommitLogManifest;
import com.google.domain.registry.model.ofy.CommitLogMutation;
import com.google.domain.registry.model.ofy.Ofy;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.util.Clock;
import com.google.domain.registry.util.FormattingLogger;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.Work;
import com.googlecode.objectify.cmd.Loader;
import com.googlecode.objectify.cmd.Query;
import org.joda.time.DateTime;
import org.joda.time.Duration;
import java.util.List;
import javax.inject.Inject;
/**
* Task that garbage collects old {@link CommitLogManifest} entities.
*
* <p>Once commit logs have been written to GCS, we don't really need them in datastore anymore,
* except to reconstruct point-in-time snapshots of the database. But that functionality is not
* useful after a certain amount of time, e.g. thirty days. So this task runs periodically to delete
* the old data.
*
* <p>This task should be invoked in a fanout style for each {@link CommitLogBucket} ID. It then
* queries {@code CommitLogManifest} entities older than the threshold, using an ancestor query
* operating under the assumption under the assumption that the ID is the transaction timestamp in
* milliseconds since the UNIX epoch. It then deletes them inside a transaction, along with their
* associated {@link CommitLogMutation} entities.
*
* <p>If additional data is leftover, we show a warning at the INFO level, because it's not
* actionable. If anything, it just shows that the system was under high load thirty days ago, and
* therefore serves little use as an early warning to increase the number of buckets.
*
* <p>Before running, this task will perform an eventually consistent count query outside of a
* transaction to see how much data actually exists to delete. If it's less than a tenth of
* {@link #maxDeletes}, then we don't bother running the task. This is to minimize contention on the
* bucket and avoid wasting resources.
*
* <h3>Dimensioning</h3>
*
* <p>This entire operation operates on a single entity group, within a single transaction. Since
* there's a 10mB upper bound on transaction size and a four minute time limit, we can only delete
* so many commit logs at once. So given the above constraints, five hundred would make a safe
* default value for {@code maxDeletes}. See {@linkplain
* com.google.domain.registry.config.ConfigModule#provideCommitLogMaxDeletes() commitLogMaxDeletes}
* for further documentation on this matter.
*
* <p>Finally, we need to pick an appropriate cron interval time for this task. Since a bucket
* represents a single datastore entity group, it's only guaranteed to have one transaction per
* second. So we just need to divide {@code maxDeletes} by sixty to get an appropriate minute
* interval. Assuming {@code maxDeletes} is five hundred, this rounds up to ten minutes, which we'll
* double, since this task can always catch up in off-peak hours.
*
* <p>There's little harm in keeping the data around a little longer, since this task is engaged in
* a zero-sum resource struggle with the EPP transactions. Each transaction we perform here, is one
* less transaction that's available to EPP. Furthermore, a well-administered system should have
* enough buckets that we'll never brush up against the 1/s entity group transaction SLA.
*/
@Action(path = "/_dr/task/deleteOldCommitLogs", method = POST, automaticallyPrintOk = true)
public final class DeleteOldCommitLogsAction implements Runnable {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@Inject Clock clock;
@Inject Ofy ofy;
@Inject @Parameter("bucket") int bucketNum;
@Inject @Config("commitLogDatastoreRetention") Duration maxAge;
@Inject @Config("commitLogMaxDeletes") int maxDeletes;
@Inject DeleteOldCommitLogsAction() {}
@Override
public void run() {
if (!doesEnoughDataExistThatThisTaskIsWorthRunning()) {
return;
}
Integer deleted = ofy.transact(new Work<Integer>() {
@Override
public Integer run() {
// Load at most maxDeletes manifest keys of commit logs older than the deletion threshold.
List<Key<CommitLogManifest>> manifestKeys =
queryManifests(ofy.load())
.limit(maxDeletes)
.keys()
.list();
// transform() is lazy so copyOf() ensures all the subqueries happen in parallel, because
// the queries are launched by iterable(), put into a list, and then the list of iterables
// is consumed and concatenated.
ofy.deleteWithoutBackup().keys(concat(copyOf(transform(manifestKeys,
new Function<Key<CommitLogManifest>, Iterable<Key<CommitLogMutation>>>() {
@Override
public Iterable<Key<CommitLogMutation>> apply(Key<CommitLogManifest> manifestKey) {
return ofy.load()
.type(CommitLogMutation.class)
.ancestor(manifestKey)
.keys()
.iterable(); // launches the query asynchronously
}}))));
ofy.deleteWithoutBackup().keys(manifestKeys);
return manifestKeys.size();
}});
if (deleted == maxDeletes) {
logger.infofmt("Additional old commit logs might exist in bucket %d", bucketNum);
}
}
/** Returns the point in time at which commit logs older than that point will be deleted. */
private DateTime getDeletionThreshold() {
return clock.nowUtc().minus(maxAge);
}
private boolean doesEnoughDataExistThatThisTaskIsWorthRunning() {
int tenth = Math.max(1, maxDeletes / 10);
int count = queryManifests(ofy.loadEventuallyConsistent())
.limit(tenth)
.count();
if (0 < count && count < tenth) {
logger.infofmt("Not enough old commit logs to bother running: %d < %d", count, tenth);
}
return count >= tenth;
}
private Query<CommitLogManifest> queryManifests(Loader loader) {
long thresholdMillis = getDeletionThreshold().getMillis();
Key<CommitLogBucket> bucketKey = getBucketKey(bucketNum);
return loader
.type(CommitLogManifest.class)
.ancestor(bucketKey)
.filterKey("<", Key.create(bucketKey, CommitLogManifest.class, thresholdMillis));
}
}

View file

@ -0,0 +1,219 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Verify.verifyNotNull;
import static com.google.common.collect.Iterables.concat;
import static com.google.common.collect.Lists.partition;
import static com.google.domain.registry.backup.BackupUtils.GcsMetadataKeys.LOWER_BOUND_CHECKPOINT;
import static com.google.domain.registry.backup.BackupUtils.GcsMetadataKeys.NUM_TRANSACTIONS;
import static com.google.domain.registry.backup.BackupUtils.GcsMetadataKeys.UPPER_BOUND_CHECKPOINT;
import static com.google.domain.registry.backup.BackupUtils.serializeEntity;
import static com.google.domain.registry.model.ofy.CommitLogBucket.getBucketKey;
import static com.google.domain.registry.model.ofy.ObjectifyService.ofy;
import static com.google.domain.registry.util.DateTimeUtils.START_OF_TIME;
import static com.google.domain.registry.util.DateTimeUtils.isAtOrAfter;
import static com.google.domain.registry.util.FormattingLogger.getLoggerForCallerClass;
import static java.nio.channels.Channels.newOutputStream;
import static java.util.Arrays.asList;
import com.google.appengine.tools.cloudstorage.GcsFileOptions;
import com.google.appengine.tools.cloudstorage.GcsFilename;
import com.google.appengine.tools.cloudstorage.GcsService;
import com.google.common.base.Function;
import com.google.common.collect.ComparisonChain;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.model.ImmutableObject;
import com.google.domain.registry.model.ofy.CommitLogBucket;
import com.google.domain.registry.model.ofy.CommitLogCheckpoint;
import com.google.domain.registry.model.ofy.CommitLogManifest;
import com.google.domain.registry.model.ofy.CommitLogMutation;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.util.FormattingLogger;
import com.googlecode.objectify.Key;
import org.joda.time.DateTime;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Collection;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import javax.annotation.Nullable;
import javax.inject.Inject;
/** Action that exports the diff between two commit log checkpoints to GCS. */
@Action(
path = ExportCommitLogDiffAction.PATH,
method = Action.Method.POST,
automaticallyPrintOk = true)
public final class ExportCommitLogDiffAction implements Runnable {
private static final FormattingLogger logger = getLoggerForCallerClass();
static final String PATH = "/_dr/task/exportCommitLogDiff";
static final String UPPER_CHECKPOINT_TIME_PARAM = "upperCheckpointTime";
static final String LOWER_CHECKPOINT_TIME_PARAM = "lowerCheckpointTime";
public static final String DIFF_FILE_PREFIX = "commit_diff_until_";
@Inject GcsService gcsService;
@Inject @Config("commitLogGcsBucket") String gcsBucket;
@Inject @Config("commitLogDiffExportBatchSize") int batchSize;
@Inject @Parameter(LOWER_CHECKPOINT_TIME_PARAM) DateTime lowerCheckpointTime;
@Inject @Parameter(UPPER_CHECKPOINT_TIME_PARAM) DateTime upperCheckpointTime;
@Inject ExportCommitLogDiffAction() {}
@Override
public void run() {
checkArgument(isAtOrAfter(lowerCheckpointTime, START_OF_TIME));
checkArgument(lowerCheckpointTime.isBefore(upperCheckpointTime));
// Load the boundary checkpoints - lower is exclusive and may not exist (on the first export,
// when lowerCheckpointTime is START_OF_TIME), whereas the upper is inclusive and must exist.
CommitLogCheckpoint lowerCheckpoint = lowerCheckpointTime.isAfter(START_OF_TIME)
? verifyNotNull(ofy().load().key(CommitLogCheckpoint.createKey(lowerCheckpointTime)).now())
: null;
CommitLogCheckpoint upperCheckpoint =
verifyNotNull(ofy().load().key(CommitLogCheckpoint.createKey(upperCheckpointTime)).now());
// Load the keys of all the manifests to include in this diff.
List<Key<CommitLogManifest>> sortedKeys = loadAllDiffKeys(lowerCheckpoint, upperCheckpoint);
logger.infofmt("Found %d manifests to export", sortedKeys.size());
// Open an output channel to GCS, wrapped in a stream for convenience.
try (OutputStream gcsStream = newOutputStream(gcsService.createOrReplace(
new GcsFilename(gcsBucket, DIFF_FILE_PREFIX + upperCheckpointTime),
new GcsFileOptions.Builder()
.addUserMetadata(LOWER_BOUND_CHECKPOINT, lowerCheckpointTime.toString())
.addUserMetadata(UPPER_BOUND_CHECKPOINT, upperCheckpointTime.toString())
.addUserMetadata(NUM_TRANSACTIONS, Integer.toString(sortedKeys.size()))
.build()))) {
// Export the upper checkpoint itself.
serializeEntity(upperCheckpoint, gcsStream);
// If there are no manifests to export, stop early, now that we've written out the file with
// the checkpoint itself (which is needed for restores, even if it's empty).
if (sortedKeys.isEmpty()) {
return;
}
// Export to GCS in chunks, one per fixed batch of commit logs. While processing one batch,
// asynchronously load the entities for the next one.
List<List<Key<CommitLogManifest>>> keyChunks = partition(sortedKeys, batchSize);
// Objectify's map return type is asynchronous. Calling .values() will block until it loads.
Map<?, CommitLogManifest> nextChunkToExport = ofy().load().keys(keyChunks.get(0));
for (int i = 0; i < keyChunks.size(); i++) {
// Force the async load to finish.
Collection<CommitLogManifest> chunkValues = nextChunkToExport.values();
logger.infofmt("Loaded %d manifests", chunkValues.size());
// Since there is no hard bound on how much data this might be, take care not to let the
// Objectify session cache fill up and potentially run out of memory. This is the only safe
// point to do this since at this point there is no async load in progress.
ofy().clearSessionCache();
// Kick off the next async load, which can happen in parallel to the current GCS export.
if (i + 1 < keyChunks.size()) {
nextChunkToExport = ofy().load().keys(keyChunks.get(i + 1));
}
exportChunk(gcsStream, chunkValues);
logger.infofmt("Exported %d manifests", chunkValues.size());
}
} catch (IOException e) {
throw new RuntimeException(e);
}
logger.infofmt("Exported %d manifests in total", sortedKeys.size());
}
/**
* Loads all the diff keys, sorted in a transaction-consistent chronological order.
*
* @param lowerCheckpoint exclusive lower bound on keys in this diff, or null if no lower bound
* @param upperCheckpoint inclusive upper bound on keys in this diff
*/
private ImmutableList<Key<CommitLogManifest>> loadAllDiffKeys(
@Nullable final CommitLogCheckpoint lowerCheckpoint,
final CommitLogCheckpoint upperCheckpoint) {
// Fetch the keys (no data) between these checkpoints, and sort by timestamp. This ordering is
// transaction-consistent by virtue of our checkpoint strategy and our customized Ofy; see
// CommitLogCheckpointStrategy for the proof. We break ties by sorting on bucket ID to ensure
// a deterministic order.
return FluentIterable.from(upperCheckpoint.getBucketTimestamps().keySet())
.transformAndConcat(new Function<Integer, Iterable<Key<CommitLogManifest>>>() {
@Override
public Iterable<Key<CommitLogManifest>> apply(Integer bucketNum) {
return loadDiffKeysFromBucket(lowerCheckpoint, upperCheckpoint, bucketNum);
}})
.toSortedList(new Comparator<Key<CommitLogManifest>>() {
@Override
public int compare(Key<CommitLogManifest> a, Key<CommitLogManifest> b) {
// Compare keys by timestamp (which is encoded in the id as millis), then by bucket id.
return ComparisonChain.start()
.compare(a.getId(), b.getId())
.compare(a.getParent().getId(), b.getParent().getId())
.result();
}});
}
/**
* Loads the diff keys for one bucket.
*
* @param lowerCheckpoint exclusive lower bound on keys in this diff, or null if no lower bound
* @param upperCheckpoint inclusive upper bound on keys in this diff
* @param bucketNum the bucket to load diff keys from
*/
private Iterable<Key<CommitLogManifest>> loadDiffKeysFromBucket(
@Nullable CommitLogCheckpoint lowerCheckpoint,
CommitLogCheckpoint upperCheckpoint,
int bucketNum) {
// If no lower checkpoint exists, use START_OF_TIME as the effective exclusive lower bound.
DateTime lowerCheckpointBucketTime = lowerCheckpoint == null
? START_OF_TIME
: lowerCheckpoint.getBucketTimestamps().get(bucketNum);
// Since START_OF_TIME=0 is not a valid id in a key, add 1 to both bounds. Then instead of
// loading lowerBound < x <= upperBound, we can load lowerBound <= x < upperBound.
DateTime lowerBound = lowerCheckpointBucketTime.plusMillis(1);
DateTime upperBound = upperCheckpoint.getBucketTimestamps().get(bucketNum).plusMillis(1);
// If the lower and upper bounds are equal, there can't be any results, so skip the query.
if (lowerBound.equals(upperBound)) {
return ImmutableSet.of();
}
Key<CommitLogBucket> bucketKey = getBucketKey(bucketNum);
return ofy().load()
.type(CommitLogManifest.class)
.ancestor(bucketKey)
.filterKey(">=", CommitLogManifest.createKey(bucketKey, lowerBound))
.filterKey("<", CommitLogManifest.createKey(bucketKey, upperBound))
.keys();
}
/** Writes a chunks-worth of manifests and associated mutations to GCS. */
private void exportChunk(OutputStream gcsStream, Collection<CommitLogManifest> chunk)
throws IOException {
// Kickoff async loads for all the manifests in the chunk.
ImmutableList.Builder<Iterable<? extends ImmutableObject>> entities =
new ImmutableList.Builder<>();
for (CommitLogManifest manifest : chunk) {
entities.add(asList(manifest));
entities.add(ofy().load().type(CommitLogMutation.class).ancestor(manifest));
}
for (ImmutableObject entity : concat(entities.build())) {
serializeEntity(entity, gcsStream);
}
}
}

View file

@ -0,0 +1,128 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.common.base.Preconditions.checkState;
import static com.google.domain.registry.backup.BackupUtils.GcsMetadataKeys.LOWER_BOUND_CHECKPOINT;
import static com.google.domain.registry.backup.ExportCommitLogDiffAction.DIFF_FILE_PREFIX;
import static com.google.domain.registry.util.DateTimeUtils.START_OF_TIME;
import static com.google.domain.registry.util.DateTimeUtils.isBeforeOrAt;
import static com.google.domain.registry.util.DateTimeUtils.latestOf;
import com.google.appengine.tools.cloudstorage.GcsFileMetadata;
import com.google.appengine.tools.cloudstorage.GcsFilename;
import com.google.appengine.tools.cloudstorage.GcsService;
import com.google.appengine.tools.cloudstorage.ListItem;
import com.google.appengine.tools.cloudstorage.ListOptions;
import com.google.common.collect.ImmutableList;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.domain.registry.backup.BackupModule.Backups;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.util.FormattingLogger;
import org.joda.time.DateTime;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import javax.inject.Inject;
/** Utility class to list commit logs diff files stored on GCS. */
class GcsDiffFileLister {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@Inject GcsService gcsService;
@Inject @Config("commitLogGcsBucket") String gcsBucket;
@Inject @Backups ListeningExecutorService executor;
@Inject GcsDiffFileLister() {}
List<GcsFileMetadata> listDiffFiles(DateTime fromTime) {
logger.info("Requested restore from time: " + fromTime);
// List all of the diff files on GCS and build a map from each file's upper checkpoint time
// (extracted from the filename) to its asynchronously-loaded metadata, keeping only files with
// an upper checkpoint time > fromTime.
Map<DateTime, ListenableFuture<GcsFileMetadata>> upperBoundTimesToMetadata = new HashMap<>();
Iterator<ListItem> listItems;
try {
// TODO(b/23554360): Use a smarter prefixing strategy to speed this up.
listItems = gcsService.list(
gcsBucket,
new ListOptions.Builder().setPrefix(DIFF_FILE_PREFIX).build());
} catch (IOException e) {
throw new RuntimeException(e);
}
DateTime lastUpperBoundTime = START_OF_TIME;
while (listItems.hasNext()) {
final String filename = listItems.next().getName();
DateTime upperBoundTime = DateTime.parse(filename.substring(DIFF_FILE_PREFIX.length()));
if (isBeforeOrAt(fromTime, upperBoundTime)) {
upperBoundTimesToMetadata.put(upperBoundTime, executor.submit(
new Callable<GcsFileMetadata>() {
@Override
public GcsFileMetadata call() throws Exception {
return getMetadata(filename);
}}));
}
lastUpperBoundTime = latestOf(upperBoundTime, lastUpperBoundTime);
}
if (upperBoundTimesToMetadata.isEmpty()) {
logger.info("No files found");
return ImmutableList.of();
}
// GCS file listing is eventually consistent, so it's possible that we are missing a file. The
// metadata of a file is sufficient to identify the preceding file, so if we start from the
// last file and work backwards we can verify that we have no holes in our chain (although we
// may be missing files at the end).
ImmutableList.Builder<GcsFileMetadata> filesBuilder = new ImmutableList.Builder<>();
logger.info("Restoring until: " + lastUpperBoundTime);
DateTime checkpointTime = lastUpperBoundTime;
while (checkpointTime.isAfter(fromTime)) {
GcsFileMetadata metadata;
if (upperBoundTimesToMetadata.containsKey(checkpointTime)) {
metadata = Futures.getUnchecked(upperBoundTimesToMetadata.get(checkpointTime));
} else {
String filename = DIFF_FILE_PREFIX + checkpointTime;
logger.info("Patching GCS list; discovered file " + filename);
metadata = getMetadata(filename);
checkState(metadata != null, "Could not read metadata for file %s", filename);
}
filesBuilder.add(metadata);
checkpointTime = getLowerBoundTime(metadata);
}
ImmutableList<GcsFileMetadata> files = filesBuilder.build().reverse();
logger.info("Actual restore from time: " + getLowerBoundTime(files.get(0)));
logger.infofmt("Found %d files to restore", files.size());
return files;
}
private DateTime getLowerBoundTime(GcsFileMetadata metadata) {
return DateTime.parse(metadata.getOptions().getUserMetadata().get(LOWER_BOUND_CHECKPOINT));
}
private GcsFileMetadata getMetadata(String filename) {
try {
return gcsService.getMetadata(new GcsFilename(gcsBucket, filename));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}

View file

@ -0,0 +1,207 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.Iterators.peekingIterator;
import static com.google.domain.registry.backup.BackupUtils.createDeserializingIterator;
import static com.google.domain.registry.model.ofy.ObjectifyService.ofy;
import static java.util.Arrays.asList;
import com.google.appengine.api.datastore.DatastoreService;
import com.google.appengine.api.datastore.Entity;
import com.google.appengine.api.datastore.EntityTranslator;
import com.google.appengine.tools.cloudstorage.GcsFileMetadata;
import com.google.appengine.tools.cloudstorage.GcsService;
import com.google.common.base.Function;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.Lists;
import com.google.common.collect.PeekingIterator;
import com.google.domain.registry.config.RegistryEnvironment;
import com.google.domain.registry.model.ImmutableObject;
import com.google.domain.registry.model.ofy.CommitLogBucket;
import com.google.domain.registry.model.ofy.CommitLogCheckpoint;
import com.google.domain.registry.model.ofy.CommitLogCheckpointRoot;
import com.google.domain.registry.model.ofy.CommitLogManifest;
import com.google.domain.registry.model.ofy.CommitLogMutation;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.util.FormattingLogger;
import com.google.domain.registry.util.Retrier;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.Result;
import com.googlecode.objectify.util.ResultNow;
import org.joda.time.DateTime;
import java.io.IOException;
import java.io.InputStream;
import java.nio.channels.Channels;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.Callable;
import javax.inject.Inject;
/** Restore Registry 2 commit logs from GCS to datastore. */
@Action(
path = RestoreCommitLogsAction.PATH,
method = Action.Method.POST,
automaticallyPrintOk = true)
public class RestoreCommitLogsAction implements Runnable {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
static final int BLOCK_SIZE = 1024 * 1024; // Buffer 1mb at a time, for no particular reason.
static final String PATH = "/_dr/task/restoreCommitLogs";
static final String DRY_RUN_PARAM = "dryRun";
static final String FROM_TIME_PARAM = "fromTime";
@Inject GcsService gcsService;
@Inject @Parameter(DRY_RUN_PARAM) boolean dryRun;
@Inject @Parameter(FROM_TIME_PARAM) DateTime fromTime;
@Inject DatastoreService datastoreService;
@Inject GcsDiffFileLister diffLister;
@Inject Retrier retrier;
@Inject RestoreCommitLogsAction() {}
@Override
public void run() {
checkArgument( // safety
RegistryEnvironment.get() == RegistryEnvironment.ALPHA
|| RegistryEnvironment.get() == RegistryEnvironment.UNITTEST,
"DO NOT RUN ANYWHERE ELSE EXCEPT ALPHA OR TESTS.");
if (dryRun) {
logger.info("Running in dryRun mode");
}
List<GcsFileMetadata> diffFiles = diffLister.listDiffFiles(fromTime);
if (diffFiles.isEmpty()) {
logger.info("Nothing to restore");
return;
}
Map<Integer, DateTime> bucketTimestamps = new HashMap<>();
CommitLogCheckpoint lastCheckpoint = null;
for (GcsFileMetadata metadata : diffFiles) {
logger.info("Restoring: " + metadata.getFilename().getObjectName());
try (InputStream input = Channels.newInputStream(
gcsService.openPrefetchingReadChannel(metadata.getFilename(), 0, BLOCK_SIZE))) {
PeekingIterator<ImmutableObject> commitLogs =
peekingIterator(createDeserializingIterator(input));
lastCheckpoint = (CommitLogCheckpoint) commitLogs.next();
saveOfy(asList(lastCheckpoint)); // Save the checkpoint itself.
while (commitLogs.hasNext()) {
CommitLogManifest manifest = restoreOneTransaction(commitLogs);
bucketTimestamps.put(manifest.getBucketId(), manifest.getCommitTime());
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
// Restore the CommitLogCheckpointRoot and CommitLogBuckets.
saveOfy(FluentIterable.from(bucketTimestamps.entrySet())
.transform(new Function<Entry<Integer, DateTime>, ImmutableObject> () {
@Override
public ImmutableObject apply(Entry<Integer, DateTime> entry) {
return new CommitLogBucket.Builder()
.setBucketNum(entry.getKey())
.setLastWrittenTime(entry.getValue())
.build();
}})
.append(CommitLogCheckpointRoot.create(lastCheckpoint.getCheckpointTime())));
}
/**
* Restore the contents of one transaction to datastore.
*
* <p>The objects to delete are listed in the {@link CommitLogManifest}, which will be the first
* object in the iterable. The objects to save follow, each as a {@link CommitLogMutation}. We
* restore by deleting the deletes and recreating the saves from their proto form. We also save
* the commit logs themselves back to datastore, so that the commit log system itself is
* transparently restored alongside the data.
*
* @return the manifest, for use in restoring the {@link CommitLogBucket}.
*/
private CommitLogManifest restoreOneTransaction(PeekingIterator<ImmutableObject> commitLogs) {
final CommitLogManifest manifest = (CommitLogManifest) commitLogs.next();
Result<?> deleteResult = deleteAsync(manifest.getDeletions());
List<Entity> entitiesToSave = Lists.newArrayList(ofy().save().toEntity(manifest));
while (commitLogs.hasNext() && commitLogs.peek() instanceof CommitLogMutation) {
CommitLogMutation mutation = (CommitLogMutation) commitLogs.next();
entitiesToSave.add(ofy().save().toEntity(mutation));
entitiesToSave.add(EntityTranslator.createFromPbBytes(mutation.getEntityProtoBytes()));
}
saveRaw(entitiesToSave);
try {
deleteResult.now();
} catch (Exception e) {
retry(new Runnable() {
@Override
public void run() {
deleteAsync(manifest.getDeletions()).now();
}});
}
return manifest;
}
private void saveRaw(final List<Entity> entitiesToSave) {
if (dryRun) {
logger.info("Would have saved " + entitiesToSave);
return;
}
retry(new Runnable() {
@Override
public void run() {
datastoreService.put(entitiesToSave);
}});
}
private void saveOfy(final Iterable<? extends ImmutableObject> objectsToSave) {
if (dryRun) {
logger.info("Would have saved " + asList(objectsToSave));
return;
}
retry(new Runnable() {
@Override
public void run() {
ofy().saveWithoutBackup().entities(objectsToSave).now();
}});
}
private Result<?> deleteAsync(Set<Key<?>> keysToDelete) {
if (dryRun) {
logger.info("Would have deleted " + keysToDelete);
}
return dryRun || keysToDelete.isEmpty()
? new ResultNow<Void>(null)
: ofy().deleteWithoutBackup().entities(keysToDelete);
}
/** Retrier for saves and deletes, since we can't proceed with any failures. */
private void retry(final Runnable runnable) {
retrier.callWithRetry(
new Callable<Void>() {
@Override
public Void call() throws Exception {
runnable.run();
return null;
}},
RuntimeException.class);
}
}

View file

@ -0,0 +1,16 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
@javax.annotation.ParametersAreNonnullByDefault
package com.google.domain.registry.backup;