Import code from internal repository to git

This commit is contained in:
Justine Tunney 2016-03-01 17:18:14 -05:00
commit 0ef0c933d2
2490 changed files with 281594 additions and 0 deletions

View file

@ -0,0 +1,10 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "http",
exports = [
"@google_api_client//jar",
"@google_api_client_appengine//jar",
"@google_http_client_appengine//jar",
],
)

View file

@ -0,0 +1,10 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "oauth2",
exports = [
"@google_api_client//jar",
"@google_http_client//jar",
"@google_oauth_client//jar",
],
)

View file

@ -0,0 +1,10 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "oauth2",
exports = [
"@google_api_client//jar",
"@google_api_client_appengine//jar",
"@google_oauth_client_appengine//jar",
],
)

View file

@ -0,0 +1,6 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "json",
exports = ["@google_api_client//jar"],
)

View file

@ -0,0 +1,9 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "http",
exports = [
"@google_api_client//jar",
"@google_http_client//jar",
],
)

View file

@ -0,0 +1,9 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "javanet",
exports = [
"@google_api_client//jar",
"@google_http_client//jar",
],
)

View file

@ -0,0 +1,9 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "json",
exports = [
"@google_api_client//jar",
"@google_http_client//jar",
],
)

View file

@ -0,0 +1,9 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "jackson2",
exports = [
"@google_api_client//jar",
"@google_http_client_jackson2//jar",
],
)

View file

@ -0,0 +1,9 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "http",
exports = [
"@google_api_client//jar",
"@google_http_client//jar",
],
)

View file

@ -0,0 +1,6 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "annotations",
exports = ["@bazel_tools//third_party:guava"],
)

View file

@ -0,0 +1,6 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "base",
exports = ["@bazel_tools//third_party:guava"],
)

6
java/com/google/common/cache/BUILD vendored Normal file
View file

@ -0,0 +1,6 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "cache",
exports = ["@bazel_tools//third_party:guava"],
)

View file

@ -0,0 +1,6 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "collect",
exports = ["@bazel_tools//third_party:guava"],
)

View file

@ -0,0 +1,6 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "escape",
exports = ["@bazel_tools//third_party:guava"],
)

View file

@ -0,0 +1,6 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "hash",
exports = ["@bazel_tools//third_party:guava"],
)

View file

@ -0,0 +1,6 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "html",
exports = ["@bazel_tools//third_party:guava"],
)

View file

@ -0,0 +1,6 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "io",
exports = ["@bazel_tools//third_party:guava"],
)

View file

@ -0,0 +1,6 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "math",
exports = ["@bazel_tools//third_party:guava"],
)

View file

@ -0,0 +1,6 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "net",
exports = ["@bazel_tools//third_party:guava"],
)

View file

@ -0,0 +1,6 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "primitives",
exports = ["@bazel_tools//third_party:guava"],
)

View file

@ -0,0 +1,6 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "reflect",
exports = ["@bazel_tools//third_party:guava"],
)

View file

@ -0,0 +1,7 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "testing",
testonly = 1,
exports = ["@bazel_tools//third_party:guava-testlib"],
)

View file

@ -0,0 +1,6 @@
package(default_visibility = ["//visibility:public"])
java_library(
name = "concurrent",
exports = ["@bazel_tools//third_party:guava"],
)

View file

@ -0,0 +1,11 @@
package(default_visibility = ["//visibility:public"])
package_group(
name = "registry_project",
packages = [
"//java/com/google/domain/registry/...",
"//javatests/com/google/domain/registry/...",
],
)

View file

@ -0,0 +1,31 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
java_library(
name = "backup",
srcs = glob(["*.java"]),
deps = [
"//java/com/google/common/annotations",
"//java/com/google/common/base",
"//java/com/google/common/collect",
"//java/com/google/common/net",
"//java/com/google/common/primitives",
"//java/com/google/common/util/concurrent",
"//java/com/google/domain/registry/config",
"//java/com/google/domain/registry/cron",
"//java/com/google/domain/registry/model",
"//java/com/google/domain/registry/request",
"//java/com/google/domain/registry/util",
"//third_party/java/appengine:appengine-api",
"//third_party/java/appengine_gcs_client",
"//third_party/java/dagger",
"//third_party/java/joda_time",
"//third_party/java/json_simple",
"//third_party/java/jsr305_annotations",
"//third_party/java/jsr330_inject",
"//third_party/java/objectify:objectify-v4_1",
"//third_party/java/servlet/servlet_api",
],
)

View file

@ -0,0 +1,92 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.appengine.api.ThreadManager.currentRequestThreadFactory;
import static com.google.common.util.concurrent.MoreExecutors.listeningDecorator;
import static com.google.domain.registry.backup.ExportCommitLogDiffAction.LOWER_CHECKPOINT_TIME_PARAM;
import static com.google.domain.registry.backup.ExportCommitLogDiffAction.UPPER_CHECKPOINT_TIME_PARAM;
import static com.google.domain.registry.backup.RestoreCommitLogsAction.FROM_TIME_PARAM;
import static com.google.domain.registry.request.RequestParameters.extractRequiredDatetimeParameter;
import static com.google.domain.registry.request.RequestParameters.extractRequiredParameter;
import static java.util.concurrent.Executors.newFixedThreadPool;
import com.google.common.primitives.Ints;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.domain.registry.cron.CommitLogFanoutAction;
import com.google.domain.registry.request.HttpException.BadRequestException;
import com.google.domain.registry.request.Parameter;
import dagger.Module;
import dagger.Provides;
import org.joda.time.DateTime;
import java.lang.annotation.Documented;
import javax.inject.Qualifier;
import javax.servlet.http.HttpServletRequest;
/**
* Dagger module for backup package.
*
* @see "com.google.domain.registry.module.backend.BackendComponent"
*/
@Module
public final class BackupModule {
/** Dagger qualifier for backups. */
@Qualifier
@Documented
public static @interface Backups {}
/** Number of threads in the threaded executor. */
private static final int NUM_THREADS = 10;
@Provides
@Parameter("bucket")
static int provideBucket(HttpServletRequest req) {
String param = extractRequiredParameter(req, CommitLogFanoutAction.BUCKET_PARAM);
Integer bucket = Ints.tryParse(param);
if (bucket == null) {
throw new BadRequestException("Bad bucket id");
}
return bucket;
}
@Provides
@Parameter(LOWER_CHECKPOINT_TIME_PARAM)
static DateTime provideLowerCheckpointKey(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, LOWER_CHECKPOINT_TIME_PARAM);
}
@Provides
@Parameter(UPPER_CHECKPOINT_TIME_PARAM)
static DateTime provideUpperCheckpointKey(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, UPPER_CHECKPOINT_TIME_PARAM);
}
@Provides
@Parameter(FROM_TIME_PARAM)
static DateTime provideFromTime(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, FROM_TIME_PARAM);
}
@Provides
@Backups
static ListeningExecutorService provideListeningExecutorService() {
return listeningDecorator(newFixedThreadPool(NUM_THREADS, currentRequestThreadFactory()));
}
}

View file

@ -0,0 +1,75 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.domain.registry.model.ofy.ObjectifyService.ofy;
import com.google.appengine.api.datastore.EntityTranslator;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.ImmutableList;
import com.google.domain.registry.model.ImmutableObject;
import com.google.storage.onestore.v3.OnestoreEntity.EntityProto;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Iterator;
/** Utilities for working with backups. */
public class BackupUtils {
/** Keys for user metadata fields on commit log files in GCS. */
public static final class GcsMetadataKeys {
private GcsMetadataKeys() {}
public static final String NUM_TRANSACTIONS = "num_transactions";
public static final String LOWER_BOUND_CHECKPOINT = "lower_bound_checkpoint";
public static final String UPPER_BOUND_CHECKPOINT = "upper_bound_checkpoint";
}
/**
* Converts the given {@link ImmutableObject} to a raw datastore entity and write it to an
* {@link OutputStream} in delimited protocol buffer format.
*/
static void serializeEntity(ImmutableObject entity, OutputStream stream) throws IOException {
EntityTranslator.convertToPb(ofy().save().toEntity(entity)).writeDelimitedTo(stream);
}
/**
* Return an iterator of {@link ImmutableObject} instances deserialized from the given stream.
*
* <p>This parses out delimited protocol buffers for raw datastore entities and then Ofy-loads
* those as {@link ImmutableObject}.
*
* <p>The iterator reads from the stream on demand, and as such will fail if the stream is closed.
*/
public static Iterator<ImmutableObject> createDeserializingIterator(final InputStream input) {
return new AbstractIterator<ImmutableObject>() {
@Override
protected ImmutableObject computeNext() {
EntityProto proto = new EntityProto();
if (proto.parseDelimitedFrom(input)) { // False means end of stream; other errors throw.
return ofy().load().<ImmutableObject>fromEntity(EntityTranslator.createFromPb(proto));
}
return endOfData();
}};
}
public static ImmutableList<ImmutableObject> deserializeEntities(byte[] bytes) {
return ImmutableList.copyOf(createDeserializingIterator(new ByteArrayInputStream(bytes)));
}
}

View file

@ -0,0 +1,86 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static com.google.appengine.api.taskqueue.TaskOptions.Builder.withUrl;
import static com.google.domain.registry.backup.ExportCommitLogDiffAction.LOWER_CHECKPOINT_TIME_PARAM;
import static com.google.domain.registry.backup.ExportCommitLogDiffAction.UPPER_CHECKPOINT_TIME_PARAM;
import static com.google.domain.registry.model.ofy.ObjectifyService.ofy;
import static com.google.domain.registry.util.DateTimeUtils.isBeforeOrAt;
import static com.google.domain.registry.util.FormattingLogger.getLoggerForCallerClass;
import com.google.domain.registry.model.ofy.CommitLogCheckpoint;
import com.google.domain.registry.model.ofy.CommitLogCheckpointRoot;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.util.Clock;
import com.google.domain.registry.util.FormattingLogger;
import com.google.domain.registry.util.TaskEnqueuer;
import com.googlecode.objectify.VoidWork;
import org.joda.time.DateTime;
import javax.inject.Inject;
/**
* Action that saves commit log checkpoints to datastore and kicks off a diff export task.
*
* <p>We separate computing and saving the checkpoint from exporting it because the export to GCS
* is retryable but should not require the computation of a new checkpoint. Saving the checkpoint
* and enqueuing the export task are done transactionally, so any checkpoint that is saved will be
* exported to GCS very soon.
*
* <p>This action's supported method is GET rather than POST because it gets invoked via cron.
*/
@Action(
path = "/_dr/cron/commitLogCheckpoint",
method = Action.Method.GET,
automaticallyPrintOk = true)
public final class CommitLogCheckpointAction implements Runnable {
private static final FormattingLogger logger = getLoggerForCallerClass();
private static final String QUEUE_NAME = "export-commits";
@Inject Clock clock;
@Inject CommitLogCheckpointStrategy strategy;
@Inject TaskEnqueuer taskEnqueuer;
@Inject CommitLogCheckpointAction() {}
@Override
public void run() {
final CommitLogCheckpoint checkpoint = strategy.computeCheckpoint();
logger.info("Generated candidate checkpoint for time " + checkpoint.getCheckpointTime());
ofy().transact(new VoidWork() {
@Override
public void vrun() {
DateTime lastWrittenTime = CommitLogCheckpointRoot.loadRoot().getLastWrittenTime();
if (isBeforeOrAt(checkpoint.getCheckpointTime(), lastWrittenTime)) {
logger.info("Newer checkpoint already written at time: " + lastWrittenTime);
return;
}
ofy().saveWithoutBackup().entities(
checkpoint,
CommitLogCheckpointRoot.create(checkpoint.getCheckpointTime()));
// Enqueue a diff task between previous and current checkpoints.
taskEnqueuer.enqueue(
getQueue(QUEUE_NAME),
withUrl(ExportCommitLogDiffAction.PATH)
.param(LOWER_CHECKPOINT_TIME_PARAM, lastWrittenTime.toString())
.param(UPPER_CHECKPOINT_TIME_PARAM, checkpoint.getCheckpointTime().toString()));
}});
}
}

View file

@ -0,0 +1,180 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.common.collect.Iterables.getOnlyElement;
import static com.google.common.collect.Maps.transformValues;
import static com.google.domain.registry.model.ofy.CommitLogBucket.getBucketKey;
import static com.google.domain.registry.util.DateTimeUtils.END_OF_TIME;
import static com.google.domain.registry.util.DateTimeUtils.earliestOf;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.collect.ImmutableMap;
import com.google.domain.registry.model.ofy.CommitLogBucket;
import com.google.domain.registry.model.ofy.CommitLogCheckpoint;
import com.google.domain.registry.model.ofy.CommitLogManifest;
import com.google.domain.registry.model.ofy.Ofy;
import com.google.domain.registry.util.Clock;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.Work;
import org.joda.time.DateTime;
import java.util.List;
import java.util.Map.Entry;
import javax.inject.Inject;
/**
* Implementation of the procedure for determining point-in-time consistent commit log checkpoint.
*
* <p>This algorithm examines the recently written commit log data and uses a dual-read approach
* to determine a point-in-time consistent set of checkpoint times for the commit log buckets. By
* "consistent" we mean, generally speaking, that if the datastore were restored by replaying all
* the commit logs up to the checkpoint times of the buckets, the result would be transactionally
* correct; there must be no "holes" where restored state depends on non-restored state.
*
* <p>The consistency guarantee really has two parts, only one of which is provided by this
* algorithm. The procedure below guarantees only that if the resulting checkpoint includes any
* given commit log, it will also include all the commit logs that were both 1) actually written
* before that commit log "in real life", and 2) have an earlier timestamp than that commit log.
* (These criteria do not necessarily imply each other, due to the lack of a global shared clock.)
* The rest of the guarantee comes from our Ofy customizations, which ensure that any transaction
* that depends on state from a previous transaction does indeed have a later timestamp.
*
* <h2>Procedure description</h2>
* <pre>
* {@code
* ComputeCheckpoint() -> returns a set consisting of a timestamp c(b_i) for every bucket b_i
*
* 1) read off the latest commit timestamp t(b_i) for every bucket b_i
* 2) iterate over the buckets b_i a second time, and
* a) do a consistent query for the next commit timestamp t'(b_i) where t'(b_i) > t(b_i)
* b) if present, add this timestamp t'(b_i) to a set S
* 3) compute a threshold time T* representing a time before all commits in S, as follows:
* a) if S is empty, let T* = + (or the "end of time")
* b) else, let T* = T - Δ, for T = min(S) and some small Δ > 0
* 4) return the set given by: min(t(b_i), T*) for all b_i
* }
* </pre>
*
* <h2>Correctness proof of algorithm</h2>
*
* <p>{@literal
* As described above, the algorithm is correct as long as it can ensure the following: given a
* commit log X written at time t(X) to bucket b_x, and another commit log Y that was written "in
* real life" before X and for which t(Y) < t(X), then if X is included in the checkpoint, so is Y;
* that is, t(X) <= c(b_x) implies t(Y) <= c(b_y).
* }
*
* <p>{@literal
* To prove this, first note that we always have c(b_i) <= t(b_i) for every b_i, i.e. every commit
* log included in the checkpoint must have been seen in the first pass. Hence if X was included,
* then X must have been written by the time we started the second pass. But since Y was written
* "in real life" prior to X, we must have seen Y by the second pass too.
* }
*
* <p>{@literal
* Now assume towards a contradiction that X is indeed included but Y is not, i.e. that we have
* t(X) <= c(b_x) but t(Y) > c(b_y). If Y was seen in the first pass, i.e. t(Y) <= t(b_y), then by
* our assumption c(b_y) < t(Y) <= t(b_y), and therefore c(b_y) != t(b_y). By the definition of
* c(b_y) it must then equal T*, so we have T* < t(Y). However, this is a contradiction since
* t(Y) < t(X) and t(X) <= c(b_x) <= T*. If instead Y was seen in the second pass but not the
* first, t'(b_y) exists and we must have t'(b_y) <= t(Y), but then since T* < T <= t'(b_y) by
* definition, we again reach the contradiction T* < t(Y).
* }
*/
class CommitLogCheckpointStrategy {
@Inject Ofy ofy;
@Inject Clock clock;
@Inject CommitLogCheckpointStrategy() {}
/** Compute and return a new CommitLogCheckpoint for the current point in time. */
public CommitLogCheckpoint computeCheckpoint() {
DateTime checkpointTime = clock.nowUtc();
ImmutableMap<Integer, DateTime> firstPassTimes = readBucketTimestamps();
DateTime threshold = readNewCommitLogsAndFindThreshold(firstPassTimes);
return CommitLogCheckpoint.create(
checkpointTime,
computeBucketCheckpointTimes(firstPassTimes, threshold));
}
/**
* Returns a map from all bucket IDs to their current last written time values, fetched without
* a transaction so with no guarantee of consistency across buckets.
*/
@VisibleForTesting
ImmutableMap<Integer, DateTime> readBucketTimestamps() {
// Use a fresh session cache so that we get the latest data from datastore.
return ofy.doWithFreshSessionCache(new Work<ImmutableMap<Integer, DateTime>>() {
@Override
public ImmutableMap<Integer, DateTime> run() {
ImmutableMap.Builder<Integer, DateTime> results = new ImmutableMap.Builder<>();
for (CommitLogBucket bucket : CommitLogBucket.loadAllBuckets()) {
results.put(bucket.getBucketNum(), bucket.getLastWrittenTime());
}
return results.build();
}});
}
/**
* Returns a threshold value defined as the latest timestamp that is before all new commit logs,
* where "new" means having a commit time after the per-bucket timestamp in the given map.
* When no such commit logs exist, the threshold value is set to END_OF_TIME.
*/
@VisibleForTesting
DateTime readNewCommitLogsAndFindThreshold(ImmutableMap<Integer, DateTime> bucketTimes) {
DateTime timeBeforeAllNewCommits = END_OF_TIME;
for (Entry<Integer, DateTime> entry : bucketTimes.entrySet()) {
Key<CommitLogBucket> bucketKey = getBucketKey(entry.getKey());
DateTime bucketTime = entry.getValue();
// Add 1 to handle START_OF_TIME since 0 isn't a valid id - filter then uses >= instead of >.
Key<CommitLogManifest> keyForFilter =
Key.create(CommitLogManifest.create(bucketKey, bucketTime.plusMillis(1), null));
List<Key<CommitLogManifest>> manifestKeys =
ofy.load()
.type(CommitLogManifest.class)
.ancestor(bucketKey)
.filterKey(">=", keyForFilter)
.limit(1)
.keys()
.list();
if (!manifestKeys.isEmpty()) {
timeBeforeAllNewCommits = earliestOf(
timeBeforeAllNewCommits,
CommitLogManifest.extractCommitTime(getOnlyElement(manifestKeys)).minusMillis(1));
}
}
return timeBeforeAllNewCommits;
}
/**
* Returns the bucket checkpoint times produced by clamping the given set of bucket timestamps to
* at most the given threshold value.
*/
@VisibleForTesting
ImmutableMap<Integer, DateTime> computeBucketCheckpointTimes(
ImmutableMap<Integer, DateTime> firstPassTimes,
final DateTime threshold) {
return ImmutableMap.copyOf(transformValues(firstPassTimes, new Function<DateTime, DateTime>() {
@Override
public DateTime apply(DateTime firstPassTime) {
return earliestOf(firstPassTime, threshold);
}}));
}
}

View file

@ -0,0 +1,160 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.common.collect.ImmutableList.copyOf;
import static com.google.common.collect.Iterables.concat;
import static com.google.common.collect.Iterables.transform;
import static com.google.domain.registry.model.ofy.CommitLogBucket.getBucketKey;
import static com.google.domain.registry.request.Action.Method.POST;
import com.google.common.base.Function;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.model.ofy.CommitLogBucket;
import com.google.domain.registry.model.ofy.CommitLogManifest;
import com.google.domain.registry.model.ofy.CommitLogMutation;
import com.google.domain.registry.model.ofy.Ofy;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.util.Clock;
import com.google.domain.registry.util.FormattingLogger;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.Work;
import com.googlecode.objectify.cmd.Loader;
import com.googlecode.objectify.cmd.Query;
import org.joda.time.DateTime;
import org.joda.time.Duration;
import java.util.List;
import javax.inject.Inject;
/**
* Task that garbage collects old {@link CommitLogManifest} entities.
*
* <p>Once commit logs have been written to GCS, we don't really need them in datastore anymore,
* except to reconstruct point-in-time snapshots of the database. But that functionality is not
* useful after a certain amount of time, e.g. thirty days. So this task runs periodically to delete
* the old data.
*
* <p>This task should be invoked in a fanout style for each {@link CommitLogBucket} ID. It then
* queries {@code CommitLogManifest} entities older than the threshold, using an ancestor query
* operating under the assumption under the assumption that the ID is the transaction timestamp in
* milliseconds since the UNIX epoch. It then deletes them inside a transaction, along with their
* associated {@link CommitLogMutation} entities.
*
* <p>If additional data is leftover, we show a warning at the INFO level, because it's not
* actionable. If anything, it just shows that the system was under high load thirty days ago, and
* therefore serves little use as an early warning to increase the number of buckets.
*
* <p>Before running, this task will perform an eventually consistent count query outside of a
* transaction to see how much data actually exists to delete. If it's less than a tenth of
* {@link #maxDeletes}, then we don't bother running the task. This is to minimize contention on the
* bucket and avoid wasting resources.
*
* <h3>Dimensioning</h3>
*
* <p>This entire operation operates on a single entity group, within a single transaction. Since
* there's a 10mB upper bound on transaction size and a four minute time limit, we can only delete
* so many commit logs at once. So given the above constraints, five hundred would make a safe
* default value for {@code maxDeletes}. See {@linkplain
* com.google.domain.registry.config.ConfigModule#provideCommitLogMaxDeletes() commitLogMaxDeletes}
* for further documentation on this matter.
*
* <p>Finally, we need to pick an appropriate cron interval time for this task. Since a bucket
* represents a single datastore entity group, it's only guaranteed to have one transaction per
* second. So we just need to divide {@code maxDeletes} by sixty to get an appropriate minute
* interval. Assuming {@code maxDeletes} is five hundred, this rounds up to ten minutes, which we'll
* double, since this task can always catch up in off-peak hours.
*
* <p>There's little harm in keeping the data around a little longer, since this task is engaged in
* a zero-sum resource struggle with the EPP transactions. Each transaction we perform here, is one
* less transaction that's available to EPP. Furthermore, a well-administered system should have
* enough buckets that we'll never brush up against the 1/s entity group transaction SLA.
*/
@Action(path = "/_dr/task/deleteOldCommitLogs", method = POST, automaticallyPrintOk = true)
public final class DeleteOldCommitLogsAction implements Runnable {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@Inject Clock clock;
@Inject Ofy ofy;
@Inject @Parameter("bucket") int bucketNum;
@Inject @Config("commitLogDatastoreRetention") Duration maxAge;
@Inject @Config("commitLogMaxDeletes") int maxDeletes;
@Inject DeleteOldCommitLogsAction() {}
@Override
public void run() {
if (!doesEnoughDataExistThatThisTaskIsWorthRunning()) {
return;
}
Integer deleted = ofy.transact(new Work<Integer>() {
@Override
public Integer run() {
// Load at most maxDeletes manifest keys of commit logs older than the deletion threshold.
List<Key<CommitLogManifest>> manifestKeys =
queryManifests(ofy.load())
.limit(maxDeletes)
.keys()
.list();
// transform() is lazy so copyOf() ensures all the subqueries happen in parallel, because
// the queries are launched by iterable(), put into a list, and then the list of iterables
// is consumed and concatenated.
ofy.deleteWithoutBackup().keys(concat(copyOf(transform(manifestKeys,
new Function<Key<CommitLogManifest>, Iterable<Key<CommitLogMutation>>>() {
@Override
public Iterable<Key<CommitLogMutation>> apply(Key<CommitLogManifest> manifestKey) {
return ofy.load()
.type(CommitLogMutation.class)
.ancestor(manifestKey)
.keys()
.iterable(); // launches the query asynchronously
}}))));
ofy.deleteWithoutBackup().keys(manifestKeys);
return manifestKeys.size();
}});
if (deleted == maxDeletes) {
logger.infofmt("Additional old commit logs might exist in bucket %d", bucketNum);
}
}
/** Returns the point in time at which commit logs older than that point will be deleted. */
private DateTime getDeletionThreshold() {
return clock.nowUtc().minus(maxAge);
}
private boolean doesEnoughDataExistThatThisTaskIsWorthRunning() {
int tenth = Math.max(1, maxDeletes / 10);
int count = queryManifests(ofy.loadEventuallyConsistent())
.limit(tenth)
.count();
if (0 < count && count < tenth) {
logger.infofmt("Not enough old commit logs to bother running: %d < %d", count, tenth);
}
return count >= tenth;
}
private Query<CommitLogManifest> queryManifests(Loader loader) {
long thresholdMillis = getDeletionThreshold().getMillis();
Key<CommitLogBucket> bucketKey = getBucketKey(bucketNum);
return loader
.type(CommitLogManifest.class)
.ancestor(bucketKey)
.filterKey("<", Key.create(bucketKey, CommitLogManifest.class, thresholdMillis));
}
}

View file

@ -0,0 +1,219 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Verify.verifyNotNull;
import static com.google.common.collect.Iterables.concat;
import static com.google.common.collect.Lists.partition;
import static com.google.domain.registry.backup.BackupUtils.GcsMetadataKeys.LOWER_BOUND_CHECKPOINT;
import static com.google.domain.registry.backup.BackupUtils.GcsMetadataKeys.NUM_TRANSACTIONS;
import static com.google.domain.registry.backup.BackupUtils.GcsMetadataKeys.UPPER_BOUND_CHECKPOINT;
import static com.google.domain.registry.backup.BackupUtils.serializeEntity;
import static com.google.domain.registry.model.ofy.CommitLogBucket.getBucketKey;
import static com.google.domain.registry.model.ofy.ObjectifyService.ofy;
import static com.google.domain.registry.util.DateTimeUtils.START_OF_TIME;
import static com.google.domain.registry.util.DateTimeUtils.isAtOrAfter;
import static com.google.domain.registry.util.FormattingLogger.getLoggerForCallerClass;
import static java.nio.channels.Channels.newOutputStream;
import static java.util.Arrays.asList;
import com.google.appengine.tools.cloudstorage.GcsFileOptions;
import com.google.appengine.tools.cloudstorage.GcsFilename;
import com.google.appengine.tools.cloudstorage.GcsService;
import com.google.common.base.Function;
import com.google.common.collect.ComparisonChain;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.model.ImmutableObject;
import com.google.domain.registry.model.ofy.CommitLogBucket;
import com.google.domain.registry.model.ofy.CommitLogCheckpoint;
import com.google.domain.registry.model.ofy.CommitLogManifest;
import com.google.domain.registry.model.ofy.CommitLogMutation;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.util.FormattingLogger;
import com.googlecode.objectify.Key;
import org.joda.time.DateTime;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Collection;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import javax.annotation.Nullable;
import javax.inject.Inject;
/** Action that exports the diff between two commit log checkpoints to GCS. */
@Action(
path = ExportCommitLogDiffAction.PATH,
method = Action.Method.POST,
automaticallyPrintOk = true)
public final class ExportCommitLogDiffAction implements Runnable {
private static final FormattingLogger logger = getLoggerForCallerClass();
static final String PATH = "/_dr/task/exportCommitLogDiff";
static final String UPPER_CHECKPOINT_TIME_PARAM = "upperCheckpointTime";
static final String LOWER_CHECKPOINT_TIME_PARAM = "lowerCheckpointTime";
public static final String DIFF_FILE_PREFIX = "commit_diff_until_";
@Inject GcsService gcsService;
@Inject @Config("commitLogGcsBucket") String gcsBucket;
@Inject @Config("commitLogDiffExportBatchSize") int batchSize;
@Inject @Parameter(LOWER_CHECKPOINT_TIME_PARAM) DateTime lowerCheckpointTime;
@Inject @Parameter(UPPER_CHECKPOINT_TIME_PARAM) DateTime upperCheckpointTime;
@Inject ExportCommitLogDiffAction() {}
@Override
public void run() {
checkArgument(isAtOrAfter(lowerCheckpointTime, START_OF_TIME));
checkArgument(lowerCheckpointTime.isBefore(upperCheckpointTime));
// Load the boundary checkpoints - lower is exclusive and may not exist (on the first export,
// when lowerCheckpointTime is START_OF_TIME), whereas the upper is inclusive and must exist.
CommitLogCheckpoint lowerCheckpoint = lowerCheckpointTime.isAfter(START_OF_TIME)
? verifyNotNull(ofy().load().key(CommitLogCheckpoint.createKey(lowerCheckpointTime)).now())
: null;
CommitLogCheckpoint upperCheckpoint =
verifyNotNull(ofy().load().key(CommitLogCheckpoint.createKey(upperCheckpointTime)).now());
// Load the keys of all the manifests to include in this diff.
List<Key<CommitLogManifest>> sortedKeys = loadAllDiffKeys(lowerCheckpoint, upperCheckpoint);
logger.infofmt("Found %d manifests to export", sortedKeys.size());
// Open an output channel to GCS, wrapped in a stream for convenience.
try (OutputStream gcsStream = newOutputStream(gcsService.createOrReplace(
new GcsFilename(gcsBucket, DIFF_FILE_PREFIX + upperCheckpointTime),
new GcsFileOptions.Builder()
.addUserMetadata(LOWER_BOUND_CHECKPOINT, lowerCheckpointTime.toString())
.addUserMetadata(UPPER_BOUND_CHECKPOINT, upperCheckpointTime.toString())
.addUserMetadata(NUM_TRANSACTIONS, Integer.toString(sortedKeys.size()))
.build()))) {
// Export the upper checkpoint itself.
serializeEntity(upperCheckpoint, gcsStream);
// If there are no manifests to export, stop early, now that we've written out the file with
// the checkpoint itself (which is needed for restores, even if it's empty).
if (sortedKeys.isEmpty()) {
return;
}
// Export to GCS in chunks, one per fixed batch of commit logs. While processing one batch,
// asynchronously load the entities for the next one.
List<List<Key<CommitLogManifest>>> keyChunks = partition(sortedKeys, batchSize);
// Objectify's map return type is asynchronous. Calling .values() will block until it loads.
Map<?, CommitLogManifest> nextChunkToExport = ofy().load().keys(keyChunks.get(0));
for (int i = 0; i < keyChunks.size(); i++) {
// Force the async load to finish.
Collection<CommitLogManifest> chunkValues = nextChunkToExport.values();
logger.infofmt("Loaded %d manifests", chunkValues.size());
// Since there is no hard bound on how much data this might be, take care not to let the
// Objectify session cache fill up and potentially run out of memory. This is the only safe
// point to do this since at this point there is no async load in progress.
ofy().clearSessionCache();
// Kick off the next async load, which can happen in parallel to the current GCS export.
if (i + 1 < keyChunks.size()) {
nextChunkToExport = ofy().load().keys(keyChunks.get(i + 1));
}
exportChunk(gcsStream, chunkValues);
logger.infofmt("Exported %d manifests", chunkValues.size());
}
} catch (IOException e) {
throw new RuntimeException(e);
}
logger.infofmt("Exported %d manifests in total", sortedKeys.size());
}
/**
* Loads all the diff keys, sorted in a transaction-consistent chronological order.
*
* @param lowerCheckpoint exclusive lower bound on keys in this diff, or null if no lower bound
* @param upperCheckpoint inclusive upper bound on keys in this diff
*/
private ImmutableList<Key<CommitLogManifest>> loadAllDiffKeys(
@Nullable final CommitLogCheckpoint lowerCheckpoint,
final CommitLogCheckpoint upperCheckpoint) {
// Fetch the keys (no data) between these checkpoints, and sort by timestamp. This ordering is
// transaction-consistent by virtue of our checkpoint strategy and our customized Ofy; see
// CommitLogCheckpointStrategy for the proof. We break ties by sorting on bucket ID to ensure
// a deterministic order.
return FluentIterable.from(upperCheckpoint.getBucketTimestamps().keySet())
.transformAndConcat(new Function<Integer, Iterable<Key<CommitLogManifest>>>() {
@Override
public Iterable<Key<CommitLogManifest>> apply(Integer bucketNum) {
return loadDiffKeysFromBucket(lowerCheckpoint, upperCheckpoint, bucketNum);
}})
.toSortedList(new Comparator<Key<CommitLogManifest>>() {
@Override
public int compare(Key<CommitLogManifest> a, Key<CommitLogManifest> b) {
// Compare keys by timestamp (which is encoded in the id as millis), then by bucket id.
return ComparisonChain.start()
.compare(a.getId(), b.getId())
.compare(a.getParent().getId(), b.getParent().getId())
.result();
}});
}
/**
* Loads the diff keys for one bucket.
*
* @param lowerCheckpoint exclusive lower bound on keys in this diff, or null if no lower bound
* @param upperCheckpoint inclusive upper bound on keys in this diff
* @param bucketNum the bucket to load diff keys from
*/
private Iterable<Key<CommitLogManifest>> loadDiffKeysFromBucket(
@Nullable CommitLogCheckpoint lowerCheckpoint,
CommitLogCheckpoint upperCheckpoint,
int bucketNum) {
// If no lower checkpoint exists, use START_OF_TIME as the effective exclusive lower bound.
DateTime lowerCheckpointBucketTime = lowerCheckpoint == null
? START_OF_TIME
: lowerCheckpoint.getBucketTimestamps().get(bucketNum);
// Since START_OF_TIME=0 is not a valid id in a key, add 1 to both bounds. Then instead of
// loading lowerBound < x <= upperBound, we can load lowerBound <= x < upperBound.
DateTime lowerBound = lowerCheckpointBucketTime.plusMillis(1);
DateTime upperBound = upperCheckpoint.getBucketTimestamps().get(bucketNum).plusMillis(1);
// If the lower and upper bounds are equal, there can't be any results, so skip the query.
if (lowerBound.equals(upperBound)) {
return ImmutableSet.of();
}
Key<CommitLogBucket> bucketKey = getBucketKey(bucketNum);
return ofy().load()
.type(CommitLogManifest.class)
.ancestor(bucketKey)
.filterKey(">=", CommitLogManifest.createKey(bucketKey, lowerBound))
.filterKey("<", CommitLogManifest.createKey(bucketKey, upperBound))
.keys();
}
/** Writes a chunks-worth of manifests and associated mutations to GCS. */
private void exportChunk(OutputStream gcsStream, Collection<CommitLogManifest> chunk)
throws IOException {
// Kickoff async loads for all the manifests in the chunk.
ImmutableList.Builder<Iterable<? extends ImmutableObject>> entities =
new ImmutableList.Builder<>();
for (CommitLogManifest manifest : chunk) {
entities.add(asList(manifest));
entities.add(ofy().load().type(CommitLogMutation.class).ancestor(manifest));
}
for (ImmutableObject entity : concat(entities.build())) {
serializeEntity(entity, gcsStream);
}
}
}

View file

@ -0,0 +1,128 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.common.base.Preconditions.checkState;
import static com.google.domain.registry.backup.BackupUtils.GcsMetadataKeys.LOWER_BOUND_CHECKPOINT;
import static com.google.domain.registry.backup.ExportCommitLogDiffAction.DIFF_FILE_PREFIX;
import static com.google.domain.registry.util.DateTimeUtils.START_OF_TIME;
import static com.google.domain.registry.util.DateTimeUtils.isBeforeOrAt;
import static com.google.domain.registry.util.DateTimeUtils.latestOf;
import com.google.appengine.tools.cloudstorage.GcsFileMetadata;
import com.google.appengine.tools.cloudstorage.GcsFilename;
import com.google.appengine.tools.cloudstorage.GcsService;
import com.google.appengine.tools.cloudstorage.ListItem;
import com.google.appengine.tools.cloudstorage.ListOptions;
import com.google.common.collect.ImmutableList;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.domain.registry.backup.BackupModule.Backups;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.util.FormattingLogger;
import org.joda.time.DateTime;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import javax.inject.Inject;
/** Utility class to list commit logs diff files stored on GCS. */
class GcsDiffFileLister {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@Inject GcsService gcsService;
@Inject @Config("commitLogGcsBucket") String gcsBucket;
@Inject @Backups ListeningExecutorService executor;
@Inject GcsDiffFileLister() {}
List<GcsFileMetadata> listDiffFiles(DateTime fromTime) {
logger.info("Requested restore from time: " + fromTime);
// List all of the diff files on GCS and build a map from each file's upper checkpoint time
// (extracted from the filename) to its asynchronously-loaded metadata, keeping only files with
// an upper checkpoint time > fromTime.
Map<DateTime, ListenableFuture<GcsFileMetadata>> upperBoundTimesToMetadata = new HashMap<>();
Iterator<ListItem> listItems;
try {
// TODO(b/23554360): Use a smarter prefixing strategy to speed this up.
listItems = gcsService.list(
gcsBucket,
new ListOptions.Builder().setPrefix(DIFF_FILE_PREFIX).build());
} catch (IOException e) {
throw new RuntimeException(e);
}
DateTime lastUpperBoundTime = START_OF_TIME;
while (listItems.hasNext()) {
final String filename = listItems.next().getName();
DateTime upperBoundTime = DateTime.parse(filename.substring(DIFF_FILE_PREFIX.length()));
if (isBeforeOrAt(fromTime, upperBoundTime)) {
upperBoundTimesToMetadata.put(upperBoundTime, executor.submit(
new Callable<GcsFileMetadata>() {
@Override
public GcsFileMetadata call() throws Exception {
return getMetadata(filename);
}}));
}
lastUpperBoundTime = latestOf(upperBoundTime, lastUpperBoundTime);
}
if (upperBoundTimesToMetadata.isEmpty()) {
logger.info("No files found");
return ImmutableList.of();
}
// GCS file listing is eventually consistent, so it's possible that we are missing a file. The
// metadata of a file is sufficient to identify the preceding file, so if we start from the
// last file and work backwards we can verify that we have no holes in our chain (although we
// may be missing files at the end).
ImmutableList.Builder<GcsFileMetadata> filesBuilder = new ImmutableList.Builder<>();
logger.info("Restoring until: " + lastUpperBoundTime);
DateTime checkpointTime = lastUpperBoundTime;
while (checkpointTime.isAfter(fromTime)) {
GcsFileMetadata metadata;
if (upperBoundTimesToMetadata.containsKey(checkpointTime)) {
metadata = Futures.getUnchecked(upperBoundTimesToMetadata.get(checkpointTime));
} else {
String filename = DIFF_FILE_PREFIX + checkpointTime;
logger.info("Patching GCS list; discovered file " + filename);
metadata = getMetadata(filename);
checkState(metadata != null, "Could not read metadata for file %s", filename);
}
filesBuilder.add(metadata);
checkpointTime = getLowerBoundTime(metadata);
}
ImmutableList<GcsFileMetadata> files = filesBuilder.build().reverse();
logger.info("Actual restore from time: " + getLowerBoundTime(files.get(0)));
logger.infofmt("Found %d files to restore", files.size());
return files;
}
private DateTime getLowerBoundTime(GcsFileMetadata metadata) {
return DateTime.parse(metadata.getOptions().getUserMetadata().get(LOWER_BOUND_CHECKPOINT));
}
private GcsFileMetadata getMetadata(String filename) {
try {
return gcsService.getMetadata(new GcsFilename(gcsBucket, filename));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}

View file

@ -0,0 +1,207 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.Iterators.peekingIterator;
import static com.google.domain.registry.backup.BackupUtils.createDeserializingIterator;
import static com.google.domain.registry.model.ofy.ObjectifyService.ofy;
import static java.util.Arrays.asList;
import com.google.appengine.api.datastore.DatastoreService;
import com.google.appengine.api.datastore.Entity;
import com.google.appengine.api.datastore.EntityTranslator;
import com.google.appengine.tools.cloudstorage.GcsFileMetadata;
import com.google.appengine.tools.cloudstorage.GcsService;
import com.google.common.base.Function;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.Lists;
import com.google.common.collect.PeekingIterator;
import com.google.domain.registry.config.RegistryEnvironment;
import com.google.domain.registry.model.ImmutableObject;
import com.google.domain.registry.model.ofy.CommitLogBucket;
import com.google.domain.registry.model.ofy.CommitLogCheckpoint;
import com.google.domain.registry.model.ofy.CommitLogCheckpointRoot;
import com.google.domain.registry.model.ofy.CommitLogManifest;
import com.google.domain.registry.model.ofy.CommitLogMutation;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.util.FormattingLogger;
import com.google.domain.registry.util.Retrier;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.Result;
import com.googlecode.objectify.util.ResultNow;
import org.joda.time.DateTime;
import java.io.IOException;
import java.io.InputStream;
import java.nio.channels.Channels;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.Callable;
import javax.inject.Inject;
/** Restore Registry 2 commit logs from GCS to datastore. */
@Action(
path = RestoreCommitLogsAction.PATH,
method = Action.Method.POST,
automaticallyPrintOk = true)
public class RestoreCommitLogsAction implements Runnable {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
static final int BLOCK_SIZE = 1024 * 1024; // Buffer 1mb at a time, for no particular reason.
static final String PATH = "/_dr/task/restoreCommitLogs";
static final String DRY_RUN_PARAM = "dryRun";
static final String FROM_TIME_PARAM = "fromTime";
@Inject GcsService gcsService;
@Inject @Parameter(DRY_RUN_PARAM) boolean dryRun;
@Inject @Parameter(FROM_TIME_PARAM) DateTime fromTime;
@Inject DatastoreService datastoreService;
@Inject GcsDiffFileLister diffLister;
@Inject Retrier retrier;
@Inject RestoreCommitLogsAction() {}
@Override
public void run() {
checkArgument( // safety
RegistryEnvironment.get() == RegistryEnvironment.ALPHA
|| RegistryEnvironment.get() == RegistryEnvironment.UNITTEST,
"DO NOT RUN ANYWHERE ELSE EXCEPT ALPHA OR TESTS.");
if (dryRun) {
logger.info("Running in dryRun mode");
}
List<GcsFileMetadata> diffFiles = diffLister.listDiffFiles(fromTime);
if (diffFiles.isEmpty()) {
logger.info("Nothing to restore");
return;
}
Map<Integer, DateTime> bucketTimestamps = new HashMap<>();
CommitLogCheckpoint lastCheckpoint = null;
for (GcsFileMetadata metadata : diffFiles) {
logger.info("Restoring: " + metadata.getFilename().getObjectName());
try (InputStream input = Channels.newInputStream(
gcsService.openPrefetchingReadChannel(metadata.getFilename(), 0, BLOCK_SIZE))) {
PeekingIterator<ImmutableObject> commitLogs =
peekingIterator(createDeserializingIterator(input));
lastCheckpoint = (CommitLogCheckpoint) commitLogs.next();
saveOfy(asList(lastCheckpoint)); // Save the checkpoint itself.
while (commitLogs.hasNext()) {
CommitLogManifest manifest = restoreOneTransaction(commitLogs);
bucketTimestamps.put(manifest.getBucketId(), manifest.getCommitTime());
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
// Restore the CommitLogCheckpointRoot and CommitLogBuckets.
saveOfy(FluentIterable.from(bucketTimestamps.entrySet())
.transform(new Function<Entry<Integer, DateTime>, ImmutableObject> () {
@Override
public ImmutableObject apply(Entry<Integer, DateTime> entry) {
return new CommitLogBucket.Builder()
.setBucketNum(entry.getKey())
.setLastWrittenTime(entry.getValue())
.build();
}})
.append(CommitLogCheckpointRoot.create(lastCheckpoint.getCheckpointTime())));
}
/**
* Restore the contents of one transaction to datastore.
*
* <p>The objects to delete are listed in the {@link CommitLogManifest}, which will be the first
* object in the iterable. The objects to save follow, each as a {@link CommitLogMutation}. We
* restore by deleting the deletes and recreating the saves from their proto form. We also save
* the commit logs themselves back to datastore, so that the commit log system itself is
* transparently restored alongside the data.
*
* @return the manifest, for use in restoring the {@link CommitLogBucket}.
*/
private CommitLogManifest restoreOneTransaction(PeekingIterator<ImmutableObject> commitLogs) {
final CommitLogManifest manifest = (CommitLogManifest) commitLogs.next();
Result<?> deleteResult = deleteAsync(manifest.getDeletions());
List<Entity> entitiesToSave = Lists.newArrayList(ofy().save().toEntity(manifest));
while (commitLogs.hasNext() && commitLogs.peek() instanceof CommitLogMutation) {
CommitLogMutation mutation = (CommitLogMutation) commitLogs.next();
entitiesToSave.add(ofy().save().toEntity(mutation));
entitiesToSave.add(EntityTranslator.createFromPbBytes(mutation.getEntityProtoBytes()));
}
saveRaw(entitiesToSave);
try {
deleteResult.now();
} catch (Exception e) {
retry(new Runnable() {
@Override
public void run() {
deleteAsync(manifest.getDeletions()).now();
}});
}
return manifest;
}
private void saveRaw(final List<Entity> entitiesToSave) {
if (dryRun) {
logger.info("Would have saved " + entitiesToSave);
return;
}
retry(new Runnable() {
@Override
public void run() {
datastoreService.put(entitiesToSave);
}});
}
private void saveOfy(final Iterable<? extends ImmutableObject> objectsToSave) {
if (dryRun) {
logger.info("Would have saved " + asList(objectsToSave));
return;
}
retry(new Runnable() {
@Override
public void run() {
ofy().saveWithoutBackup().entities(objectsToSave).now();
}});
}
private Result<?> deleteAsync(Set<Key<?>> keysToDelete) {
if (dryRun) {
logger.info("Would have deleted " + keysToDelete);
}
return dryRun || keysToDelete.isEmpty()
? new ResultNow<Void>(null)
: ofy().deleteWithoutBackup().entities(keysToDelete);
}
/** Retrier for saves and deletes, since we can't proceed with any failures. */
private void retry(final Runnable runnable) {
retrier.callWithRetry(
new Callable<Void>() {
@Override
public Void call() throws Exception {
runnable.run();
return null;
}},
RuntimeException.class);
}
}

View file

@ -0,0 +1,16 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
@javax.annotation.ParametersAreNonnullByDefault
package com.google.domain.registry.backup;

View file

@ -0,0 +1,30 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
java_library(
name = "bigquery",
srcs = glob(["*.java"]),
deps = [
"//apiserving/discoverydata/bigquery:bigqueryv2",
"//java/com/google/api/client/extensions/appengine/http",
"//java/com/google/api/client/googleapis/auth/oauth2",
"//java/com/google/api/client/googleapis/extensions/appengine/auth/oauth2",
"//java/com/google/api/client/googleapis/json",
"//java/com/google/api/client/http",
"//java/com/google/api/client/json",
"//java/com/google/api/client/json/jackson2",
"//java/com/google/common/base",
"//java/com/google/common/collect",
"//java/com/google/common/io",
"//java/com/google/common/util/concurrent",
"//java/com/google/domain/registry/config",
"//java/com/google/domain/registry/request",
"//java/com/google/domain/registry/util",
"//third_party/java/dagger",
"//third_party/java/joda_time",
"//third_party/java/jsr305_annotations",
"//third_party/java/jsr330_inject",
],
)

View file

@ -0,0 +1,775 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.bigquery;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.base.Strings.isNullOrEmpty;
import static com.google.common.base.Verify.verify;
import static com.google.domain.registry.bigquery.BigqueryUtils.toJobReferenceString;
import static org.joda.time.DateTimeZone.UTC;
import com.google.api.client.googleapis.auth.oauth2.GoogleCredential;
import com.google.api.client.googleapis.json.GoogleJsonResponseException;
import com.google.api.client.http.AbstractInputStreamContent;
import com.google.api.client.http.HttpRequestInitializer;
import com.google.api.client.http.HttpTransport;
import com.google.api.client.json.JsonFactory;
import com.google.api.services.bigquery.Bigquery;
import com.google.api.services.bigquery.model.Dataset;
import com.google.api.services.bigquery.model.DatasetReference;
import com.google.api.services.bigquery.model.ErrorProto;
import com.google.api.services.bigquery.model.GetQueryResultsResponse;
import com.google.api.services.bigquery.model.Job;
import com.google.api.services.bigquery.model.JobConfiguration;
import com.google.api.services.bigquery.model.JobConfigurationExtract;
import com.google.api.services.bigquery.model.JobConfigurationLoad;
import com.google.api.services.bigquery.model.JobConfigurationQuery;
import com.google.api.services.bigquery.model.JobReference;
import com.google.api.services.bigquery.model.JobStatistics;
import com.google.api.services.bigquery.model.JobStatus;
import com.google.api.services.bigquery.model.Table;
import com.google.api.services.bigquery.model.TableCell;
import com.google.api.services.bigquery.model.TableFieldSchema;
import com.google.api.services.bigquery.model.TableReference;
import com.google.api.services.bigquery.model.TableRow;
import com.google.api.services.bigquery.model.ViewDefinition;
import com.google.common.base.Function;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableTable;
import com.google.common.io.BaseEncoding;
import com.google.common.util.concurrent.AsyncFunction;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.domain.registry.bigquery.BigqueryUtils.DestinationFormat;
import com.google.domain.registry.bigquery.BigqueryUtils.SourceFormat;
import com.google.domain.registry.bigquery.BigqueryUtils.TableType;
import com.google.domain.registry.bigquery.BigqueryUtils.WriteDisposition;
import com.google.domain.registry.config.RegistryEnvironment;
import com.google.domain.registry.util.FormattingLogger;
import com.google.domain.registry.util.NonFinalForTesting;
import com.google.domain.registry.util.Sleeper;
import com.google.domain.registry.util.SqlTemplate;
import com.google.domain.registry.util.SystemSleeper;
import org.joda.time.DateTime;
import org.joda.time.Duration;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import javax.annotation.Nullable;
/** Class encapsulating parameters and state for accessing the Bigquery API. */
public class BigqueryConnection implements AutoCloseable {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
private static final Duration MIN_POLL_INTERVAL = Duration.millis(500);
@NonFinalForTesting
private static Sleeper sleeper = new SystemSleeper();
/** Default name of the default dataset to use for requests to the API. */
public static final String DEFAULT_DATASET_NAME = "testing";
/** Default dataset to use for storing temporary tables. */
private static final String TEMP_DATASET_NAME = "__temp__";
/** Default time to live for temporary tables. */
private static final Duration TEMP_TABLE_TTL = Duration.standardHours(24);
/** Bigquery client instance wrapped by this class. */
private Bigquery bigquery;
/** Executor service for bigquery jobs. */
private ListeningExecutorService service;
/** Credential object to use for initializing HTTP requests to the bigquery API. */
private HttpRequestInitializer credential;
/** HTTP transport object to use for accessing bigquery API. */
private HttpTransport httpTransport;
/** JSON factory object to use for accessing bigquery API. */
private JsonFactory jsonFactory;
/** Pseudo-randomness source to use for creating random table names. */
private Random random = new Random();
/** Name of the default dataset to use for inserting tables. */
private String datasetId = DEFAULT_DATASET_NAME;
/** Whether to automatically overwrite existing tables and views. */
private boolean overwrite = false;
/** Duration to wait between polls for job status. */
private Duration pollInterval = Duration.millis(1000);
/** Builder for a {@link BigqueryConnection}, since the latter is immutable once created. */
public static class Builder {
private BigqueryConnection instance;
public Builder() {
instance = new BigqueryConnection();
}
/**
* The BigqueryConnection takes ownership of this {@link ExecutorService} and will
* shut it down when the BigqueryConnection is closed.
*/
public Builder setExecutorService(ExecutorService executorService) {
instance.service = MoreExecutors.listeningDecorator(executorService);
return this;
}
public Builder setCredential(GoogleCredential credential) {
instance.credential = checkNotNull(credential);
instance.httpTransport = credential.getTransport();
instance.jsonFactory = credential.getJsonFactory();
return this;
}
public Builder setDatasetId(String datasetId) {
instance.datasetId = checkNotNull(datasetId);
return this;
}
public Builder setOverwrite(boolean overwrite) {
instance.overwrite = overwrite;
return this;
}
public Builder setPollInterval(Duration pollInterval) {
checkArgument(
!pollInterval.isShorterThan(MIN_POLL_INTERVAL),
"poll interval must be at least %ldms", MIN_POLL_INTERVAL.getMillis());
instance.pollInterval = pollInterval;
return this;
}
public BigqueryConnection build() {
try {
checkNotNull(instance.service, "Must provide executor service");
return instance;
} finally {
// Clear the internal instance so you can't accidentally mutate it through this builder.
instance = null;
}
}
}
/**
* Class that wraps a normal Bigquery API Table object to make it immutable from the client side
* and give it additional semantics as a "destination" for load or query jobs, with an overwrite
* flag set by the client upon creation.
* <p>
* Additionally provides encapsulation so that clients of BigqueryConnection don't need to take
* any direct dependencies on Bigquery API classes and can instead use DestinationTable.
*/
public static class DestinationTable {
/** The wrapped Bigquery API Table object. */
private final Table table;
/** The type of this table. */
private final TableType type;
/** The write disposition for jobs writing to this destination table. */
private final WriteDisposition writeDisposition;
/**
* A query to package with this table if the type is VIEW; not immutable but also not visible
* to clients.
*/
private String query;
/** A builder for DestinationTable. */
public static final class Builder {
private final Table table = new Table();
private final TableReference tableRef = new TableReference();
private TableType type = TableType.TABLE;
private WriteDisposition writeDisposition = WriteDisposition.WRITE_EMPTY;
public Builder datasetId(String datasetId) {
tableRef.setDatasetId(datasetId);
return this;
}
public Builder name(String name) {
tableRef.setTableId(name);
return this;
}
public Builder description(String description) {
table.setDescription(description);
return this;
}
public Builder type(TableType type) {
this.type = type;
return this;
}
public Builder timeToLive(Duration duration) {
this.table.setExpirationTime(new DateTime(UTC).plus(duration).getMillis());
return this;
}
public Builder overwrite(boolean overwrite) {
if (overwrite) {
this.writeDisposition = WriteDisposition.WRITE_TRUNCATE;
}
return this;
}
public Builder append(boolean append) {
if (append) {
this.writeDisposition = WriteDisposition.WRITE_APPEND;
}
return this;
}
public DestinationTable build() {
tableRef.setProjectId(getEnvironmentProjectId());
table.setTableReference(tableRef);
checkState(!isNullOrEmpty(table.getTableReference().getDatasetId()));
checkState(!isNullOrEmpty(table.getTableReference().getTableId()));
return new DestinationTable(this);
}
}
/** Constructs a new DestinationTable from its Builder. */
private DestinationTable(Builder b) {
table = b.table.clone();
type = b.type;
writeDisposition = b.writeDisposition;
}
/**
* Stores the provided query with this DestinationTable and returns it; used for packaging
* a query along with the DestinationTable before sending it to the table update logic.
*/
private DestinationTable withQuery(String query) {
checkState(type == TableType.VIEW);
this.query = query;
return this;
}
/** Returns a new copy of the Bigquery API Table object wrapped by this DestinationTable. */
private Table getTable() {
Table tableCopy = table.clone();
if (type == TableType.VIEW) {
tableCopy.setView(new ViewDefinition().setQuery(query));
}
return tableCopy;
}
/** Returns the write disposition that should be used for jobs writing to this table. */
private WriteDisposition getWriteDisposition() {
return writeDisposition;
}
/** Returns a new copy of the TableReference for the Table wrapped by this DestinationTable. */
private TableReference getTableReference() {
return table.getTableReference().clone();
}
/** Returns a string representation of the TableReference for the wrapped table. */
public String getStringReference() {
return tableReferenceToString(table.getTableReference());
}
/** Returns a string representation of the given TableReference. */
private static String tableReferenceToString(TableReference tableRef) {
return String.format(
"%s:%s.%s",
tableRef.getProjectId(),
tableRef.getDatasetId(),
tableRef.getTableId());
}
}
/**
* Initializes the BigqueryConnection object by setting up the API client and creating the
* default dataset if it doesn't exist.
*/
public BigqueryConnection initialize() throws Exception {
bigquery = new Bigquery.Builder(httpTransport, jsonFactory, credential)
.setApplicationName(getClass().getSimpleName())
.build();
createDatasetIfNeeded(datasetId);
createDatasetIfNeeded(TEMP_DATASET_NAME);
return this;
}
/**
* Closes the BigqueryConnection object by shutting down the executor service. Clients
* should only call this after all ListenableFutures obtained from BigqueryConnection methods
* have resolved; this method does not block on their completion.
*/
@Override
public void close() {
service.shutdown();
}
/** Returns a partially built DestinationTable with the default dataset and overwrite behavior. */
public DestinationTable.Builder buildDestinationTable(String tableName) {
return new DestinationTable.Builder()
.datasetId(datasetId)
.type(TableType.TABLE)
.name(tableName)
.overwrite(overwrite);
}
/**
* Returns a partially built DestinationTable with a randomly generated name under the default
* temporary table dataset, with the default TTL and overwrite behavior.
*/
public DestinationTable.Builder buildTemporaryTable() {
return new DestinationTable.Builder()
.datasetId(TEMP_DATASET_NAME)
.type(TableType.TABLE)
.name(getRandomTableName())
.timeToLive(TEMP_TABLE_TTL)
.overwrite(overwrite);
}
/** Returns a random table name consisting only of the chars {@code [a-v0-9_]}. */
private String getRandomTableName() {
byte[] randBytes = new byte[8]; // 64 bits of randomness ought to be plenty.
random.nextBytes(randBytes);
return "_" + BaseEncoding.base32Hex().lowerCase().omitPadding().encode(randBytes);
}
/**
* A function that updates the specified Bigquery table to reflect the metadata from the input
* DestinationTable, passing the same DestinationTable through as the output. If the specified
* table does not already exist, it will be inserted into the dataset.
* <p>
* Clients can call this function directly to update a table on demand, or can pass it to
* Futures.transform() to update a table produced as the asynchronous result of a load or query
* job (e.g. to add a description to it).
*/
private class UpdateTableFunction implements Function<DestinationTable, DestinationTable> {
@Override
public DestinationTable apply(final DestinationTable destinationTable) {
Table table = destinationTable.getTable();
TableReference ref = table.getTableReference();
try {
if (checkTableExists(ref.getDatasetId(), ref.getTableId())) {
bigquery.tables()
.update(ref.getProjectId(), ref.getDatasetId(), ref.getTableId(), table)
.execute();
} else {
bigquery.tables()
.insert(ref.getProjectId(), ref.getDatasetId(), table)
.execute();
}
return destinationTable;
} catch (IOException e) {
throw BigqueryJobFailureException.create(e);
}
}
}
/**
* Starts an asynchronous load job to populate the specified destination table with the given
* source URIs and source format. Returns a ListenableFuture that holds the same destination
* table object on success.
*/
public ListenableFuture<DestinationTable> load(
DestinationTable dest,
SourceFormat sourceFormat,
Iterable<String> sourceUris) throws Exception {
Job job = new Job()
.setConfiguration(new JobConfiguration()
.setLoad(new JobConfigurationLoad()
.setWriteDisposition(dest.getWriteDisposition().toString())
.setSourceFormat(sourceFormat.toString())
.setSourceUris(ImmutableList.copyOf(sourceUris))
.setDestinationTable(dest.getTableReference())));
return Futures.transform(runJobToCompletion(job, dest), new UpdateTableFunction());
}
/**
* Starts an asynchronous query job to populate the specified destination table with the results
* of the specified query, or if the table is a view, to update the view to reflect that query.
* Returns a ListenableFuture that holds the same destination table object on success.
*/
public ListenableFuture<DestinationTable> query(
String querySql,
DestinationTable dest) {
if (dest.type == TableType.VIEW) {
// Use Futures.transform() rather than calling apply() directly so that any exceptions thrown
// by calling UpdateTableFunction will be propagated on the get() call, not from here.
return Futures.transform(
Futures.immediateFuture(dest.withQuery(querySql)), new UpdateTableFunction());
} else {
Job job = new Job()
.setConfiguration(new JobConfiguration()
.setQuery(new JobConfigurationQuery()
.setQuery(querySql)
.setDefaultDataset(getDataset())
.setWriteDisposition(dest.getWriteDisposition().toString())
.setDestinationTable(dest.getTableReference())));
return Futures.transform(runJobToCompletion(job, dest), new UpdateTableFunction());
}
}
/**
* Starts an asynchronous query job to dump the results of the specified query into a local
* ImmutableTable object, row-keyed by the row number (indexed from 1), column-keyed by the
* TableFieldSchema for that column, and with the value object as the cell value. Note that null
* values will not actually be null, but they can be checked for using Data.isNull().
* <p>
* Returns a ListenableFuture that holds the ImmutableTable on success.
*/
public ListenableFuture<ImmutableTable<Integer, TableFieldSchema, Object>>
queryToLocalTable(String querySql) throws Exception {
Job job = new Job()
.setConfiguration(new JobConfiguration()
.setQuery(new JobConfigurationQuery()
.setQuery(querySql)
.setDefaultDataset(getDataset())));
return Futures.transform(
runJobToCompletion(job),
new Function<Job, ImmutableTable<Integer, TableFieldSchema, Object>>() {
@Override
public ImmutableTable<Integer, TableFieldSchema, Object> apply(Job job) {
return getQueryResults(job);
}});
}
/**
* Returns the query results for the given job as an ImmutableTable, row-keyed by row number
* (indexed from 1), column-keyed by the TableFieldSchema for that field, and with the value
* object as the cell value. Note that null values will not actually be null (since we're using
* ImmutableTable) but they can be checked for using Data.isNull().
* <p>
* This table is fully materialized in memory (not lazily loaded), so it should not be used with
* queries expected to return large results.
*/
private ImmutableTable<Integer, TableFieldSchema, Object> getQueryResults(Job job) {
try {
ImmutableTable.Builder<Integer, TableFieldSchema, Object> builder =
new ImmutableTable.Builder<>();
String pageToken = null;
int rowNumber = 1;
while (true) {
GetQueryResultsResponse queryResults = bigquery.jobs()
.getQueryResults(getProjectId(), job.getJobReference().getJobId())
.setPageToken(pageToken)
.execute();
// If the job isn't complete yet, retry; getQueryResults() waits for up to 10 seconds on
// each invocation so this will effectively poll for completion.
if (queryResults.getJobComplete()) {
List<TableFieldSchema> schemaFields = queryResults.getSchema().getFields();
for (TableRow row : queryResults.getRows()) {
Iterator<TableFieldSchema> fieldIterator = schemaFields.iterator();
Iterator<TableCell> cellIterator = row.getF().iterator();
while (fieldIterator.hasNext() && cellIterator.hasNext()) {
builder.put(rowNumber, fieldIterator.next(), cellIterator.next().getV());
}
rowNumber++;
}
pageToken = queryResults.getPageToken();
if (pageToken == null) {
break;
}
}
}
return builder.build();
} catch (IOException e) {
throw BigqueryJobFailureException.create(e);
}
}
/**
* Starts an asynchronous job to extract the specified source table and output it to the
* given GCS filepath in the specified destination format, optionally printing headers.
* Returns a ListenableFuture that holds the destination GCS URI on success.
*/
private ListenableFuture<String> extractTable(
DestinationTable sourceTable,
String destinationUri,
DestinationFormat destinationFormat,
boolean printHeader) {
checkArgument(sourceTable.type == TableType.TABLE);
Job job = new Job()
.setConfiguration(new JobConfiguration()
.setExtract(new JobConfigurationExtract()
.setSourceTable(sourceTable.getTableReference())
.setDestinationFormat(destinationFormat.toString())
.setDestinationUris(ImmutableList.of(destinationUri))
.setPrintHeader(printHeader)));
return runJobToCompletion(job, destinationUri);
}
/**
* Starts an asynchronous job to extract the specified source table or view and output it to the
* given GCS filepath in the specified destination format, optionally printing headers.
* Returns a ListenableFuture that holds the destination GCS URI on success.
*/
public ListenableFuture<String> extract(
DestinationTable sourceTable,
String destinationUri,
DestinationFormat destinationFormat,
boolean printHeader) {
if (sourceTable.type == TableType.TABLE) {
return extractTable(sourceTable, destinationUri, destinationFormat, printHeader);
} else {
// We can't extract directly from a view, so instead extract from a query dumping that view.
return extractQuery(
SqlTemplate
.create("SELECT * FROM [%DATASET%.%TABLE%]")
.put("DATASET", sourceTable.getTableReference().getDatasetId())
.put("TABLE", sourceTable.getTableReference().getTableId())
.build(),
destinationUri,
destinationFormat,
printHeader);
}
}
/**
* Starts an asynchronous job to run the provided query, store the results in a temporary table,
* and then extract the contents of that table to the given GCS filepath in the specified
* destination format, optionally printing headers.
* <p>
* Returns a ListenableFuture that holds the destination GCS URI on success.
*/
public ListenableFuture<String> extractQuery(
String querySql,
final String destinationUri,
final DestinationFormat destinationFormat,
final boolean printHeader) {
// Note: although BigQuery queries save their results to an auto-generated anonymous table,
// we can't rely on that for running the extract job because it may not be fully replicated.
// Tracking bug for query-to-GCS support is b/13777340.
DestinationTable tempTable = buildTemporaryTable().build();
return Futures.transformAsync(
query(querySql, tempTable), new AsyncFunction<DestinationTable, String>() {
@Override
public ListenableFuture<String> apply(DestinationTable tempTable) {
return extractTable(tempTable, destinationUri, destinationFormat, printHeader);
}
});
}
/** @see #runJob(Job, AbstractInputStreamContent) */
public Job runJob(Job job) {
return runJob(job, null);
}
/**
* Lanuch a job, wait for it to complete, but <i>do not</i> check for errors.
*
* @throws BigqueryJobFailureException
*/
public Job runJob(Job job, @Nullable AbstractInputStreamContent data) {
return checkJob(waitForJob(launchJob(job, data)));
}
/**
* Lanuch a job, but do not wait for it to complete.
*
* @throws BigqueryJobFailureException
*/
private Job launchJob(Job job, @Nullable AbstractInputStreamContent data) {
verify(job.getStatus() == null);
try {
return data != null
? bigquery.jobs().insert(getProjectId(), job, data).execute()
: bigquery.jobs().insert(getProjectId(), job).execute();
} catch (IOException e) {
throw BigqueryJobFailureException.create(e);
}
}
/**
* Synchronously waits for a job to complete that's already been launched.
*
* @throws BigqueryJobFailureException
*/
private Job waitForJob(Job job) {
verify(job.getStatus() != null);
while (!job.getStatus().getState().equals("DONE")) {
sleeper.sleepUninterruptibly(pollInterval);
JobReference ref = job.getJobReference();
try {
job = bigquery.jobs().get(ref.getProjectId(), ref.getJobId()).execute();
} catch (IOException e) {
throw BigqueryJobFailureException.create(e);
}
}
return job;
}
/**
* Checks completed job for errors.
*
* @throws BigqueryJobFailureException
*/
private static Job checkJob(Job job) {
verify(job.getStatus() != null);
JobStatus jobStatus = job.getStatus();
if (jobStatus.getErrorResult() != null) {
throw BigqueryJobFailureException.create(jobStatus);
} else {
logger.info(summarizeCompletedJob(job));
if (jobStatus.getErrors() != null) {
for (ErrorProto error : jobStatus.getErrors()) {
logger.warning(String.format("%s: %s", error.getReason(), error.getMessage()));
}
}
return job;
}
}
/** Returns a summarization of a completed job's statistics for logging. */
private static String summarizeCompletedJob(Job job) {
JobStatistics stats = job.getStatistics();
return String.format(
"Job took %,.3f seconds after a %,.3f second delay and processed %,d bytes (%s)",
(stats.getEndTime() - stats.getStartTime()) / 1000.0,
(stats.getStartTime() - stats.getCreationTime()) / 1000.0,
stats.getTotalBytesProcessed(),
toJobReferenceString(job.getJobReference()));
}
private <T> ListenableFuture<T> runJobToCompletion(Job job, T result) {
return runJobToCompletion(job, result, null);
}
/** Runs job and returns a future that yields {@code result} when {@code job} is completed. */
private <T> ListenableFuture<T> runJobToCompletion(
final Job job,
final T result,
@Nullable final AbstractInputStreamContent data) {
return service.submit(new Callable<T>() {
@Override
public T call() {
runJob(job, data);
return result;
}});
}
private ListenableFuture<Job> runJobToCompletion(final Job job) {
return service.submit(new Callable<Job>() {
@Override
public Job call() {
return runJob(job, null);
}});
}
/** Helper that returns true if a dataset with this name exists. */
public boolean checkDatasetExists(String datasetName) throws IOException {
try {
bigquery.datasets().get(getProjectId(), datasetName).execute();
return true;
} catch (GoogleJsonResponseException e) {
if (e.getDetails().getCode() == 404) {
return false;
}
throw e;
}
}
/** Helper that returns true if a table with this name and dataset name exists. */
public boolean checkTableExists(String datasetName, String tableName) throws IOException {
try {
bigquery.tables().get(getProjectId(), datasetName, tableName).execute();
return true;
} catch (GoogleJsonResponseException e) {
if (e.getDetails().getCode() == 404) {
return false;
}
throw e;
}
}
/** Returns the projectId set by the environment, or {@code null} if none is set. */
public static String getEnvironmentProjectId() {
return RegistryEnvironment.get().config().getProjectId();
}
/** Returns the projectId associated with this bigquery connection. */
public String getProjectId() {
return getEnvironmentProjectId();
}
/** Returns the dataset name that this bigquery connection uses by default. */
public String getDatasetId() {
return datasetId;
}
/** Returns dataset reference that can be used to avoid having to specify dataset in SQL code. */
public DatasetReference getDataset() {
return new DatasetReference()
.setProjectId(getProjectId())
.setDatasetId(getDatasetId());
}
/** Returns table reference with the projectId and datasetId filled out for you. */
public TableReference getTable(String tableName) {
return new TableReference()
.setProjectId(getProjectId())
.setDatasetId(getDatasetId())
.setTableId(tableName);
}
/**
* Helper that creates a dataset with this name if it doesn't already exist, and returns true
* if creation took place.
*/
public boolean createDatasetIfNeeded(String datasetName) throws IOException {
if (!checkDatasetExists(datasetName)) {
bigquery.datasets()
.insert(getProjectId(), new Dataset().setDatasetReference(new DatasetReference()
.setProjectId(getProjectId())
.setDatasetId(datasetName)))
.execute();
System.err.printf("Created dataset: %s:%s\n", getProjectId(), datasetName);
return true;
}
return false;
}
/** Create a table from a SQL query if it doesn't already exist. */
public TableReference ensureTable(TableReference table, String sqlQuery) {
try {
runJob(new Job()
.setConfiguration(new JobConfiguration()
.setQuery(new JobConfigurationQuery()
.setQuery(sqlQuery)
.setDefaultDataset(getDataset())
.setDestinationTable(table))));
} catch (BigqueryJobFailureException e) {
if (e.getReason().equals("duplicate")) {
// Table already exists.
} else {
throw e;
}
}
return table;
}
}

View file

@ -0,0 +1,33 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.bigquery;
import com.google.api.client.http.HttpRequestInitializer;
import com.google.api.client.http.HttpTransport;
import com.google.api.client.json.JsonFactory;
import com.google.api.services.bigquery.Bigquery;
/** Factory for returning {@link Bigquery} instances. */
public class BigqueryFactory {
public Bigquery create(
String applicationName,
HttpTransport transport,
JsonFactory jsonFactory,
HttpRequestInitializer httpRequestInitializer) {
return new Bigquery.Builder(transport, jsonFactory, httpRequestInitializer)
.setApplicationName(applicationName)
.build();
}
}

View file

@ -0,0 +1,75 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.bigquery;
import com.google.api.services.bigquery.Bigquery;
import com.google.api.services.bigquery.model.Dataset;
import com.google.api.services.bigquery.model.DatasetReference;
import com.google.api.services.bigquery.model.Table;
import com.google.api.services.bigquery.model.TableFieldSchema;
import com.google.api.services.bigquery.model.TableReference;
import com.google.api.services.bigquery.model.TableSchema;
import com.google.common.collect.ImmutableList;
import com.google.domain.registry.util.FormattingLogger;
import java.io.IOException;
/** Helpers for Bigquery. */
public class BigqueryHelper {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
/**
* Ensures the dataset exists by trying to create it. Note that it's not appreciably cheaper
* to check for dataset existence than it is to try to create it and check for exceptions.
*/
// Note that these are not static so they can be mocked for testing.
public void ensureDataset(Bigquery bigquery, String projectId, String datasetId)
throws IOException {
try {
bigquery.datasets()
.insert(projectId,
new Dataset().setDatasetReference(
new DatasetReference()
.setProjectId(projectId)
.setDatasetId(datasetId)))
.execute();
} catch (IOException e) {
// Swallow errors about a duplicate dataset, and throw any other ones.
if (!BigqueryJobFailureException.create(e).getReason().equals("duplicate")) {
throw e;
}
}
}
/** Ensures the table exists in Bigquery. */
public void ensureTable(Bigquery bigquery, TableReference table,
ImmutableList<TableFieldSchema> schema) throws IOException {
try {
bigquery.tables().insert(table.getProjectId(), table.getDatasetId(), new Table()
.setSchema(new TableSchema().setFields(schema))
.setTableReference(table))
.execute();
logger.infofmt("Created BigQuery table %s:%s.%s", table.getProjectId(), table.getDatasetId(),
table.getTableId());
} catch (IOException e) {
// Swallow errors about a table that exists, and throw any other ones.
if (!BigqueryJobFailureException.create(e).getReason().equals("duplicate")) {
throw e;
}
}
}
}

View file

@ -0,0 +1,120 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.bigquery;
import static com.google.common.base.Preconditions.checkArgument;
import com.google.api.client.googleapis.json.GoogleJsonError;
import com.google.api.client.googleapis.json.GoogleJsonResponseException;
import com.google.api.services.bigquery.model.ErrorProto;
import com.google.api.services.bigquery.model.JobStatus;
import com.google.common.collect.Iterables;
import java.io.IOException;
import javax.annotation.Nullable;
/** Generic exception to throw if a Bigquery job fails. */
public final class BigqueryJobFailureException extends RuntimeException {
/** Delegate {@link IOException} errors, checking for {@link GoogleJsonResponseException} */
public static BigqueryJobFailureException create(IOException cause) {
if (cause instanceof GoogleJsonResponseException) {
return create(((GoogleJsonResponseException) cause).getDetails());
} else {
return new BigqueryJobFailureException(cause.getMessage(), cause, null, null);
}
}
/** Create an error for JSON server response errors. */
public static BigqueryJobFailureException create(GoogleJsonError error) {
return new BigqueryJobFailureException(error.getMessage(), null, null, error);
}
/** Create an error from a failed job. */
public static BigqueryJobFailureException create(JobStatus jobStatus) {
checkArgument(jobStatus.getErrorResult() != null, "this job didn't fail!");
return new BigqueryJobFailureException(
describeError(jobStatus.getErrorResult()), null, jobStatus, null);
}
@Nullable
private final JobStatus jobStatus;
@Nullable
private final GoogleJsonError jsonError;
private BigqueryJobFailureException(
String message,
@Nullable Throwable cause,
@Nullable JobStatus jobStatus,
@Nullable GoogleJsonError jsonError) {
super(message, cause);
this.jobStatus = jobStatus;
this.jsonError = jsonError;
}
/**
* Returns a short error code describing why this job failed.
*
* <h3>Sample Reasons</h3>
*
* <ul>
* <li>{@code "duplicate"}: The table you're trying to create already exists.
* <li>{@code "invalidQuery"}: Query syntax error of some sort.
* <li>{@code "unknown"}: Non-Bigquery errors.
* </ul>
*
* @see "https://cloud.google.com/bigquery/troubleshooting-errors"
*/
public String getReason() {
if (jobStatus != null) {
return jobStatus.getErrorResult().getReason();
} else if (jsonError != null) {
return Iterables.getLast(jsonError.getErrors()).getReason();
} else {
return "unknown";
}
}
@Override
public String toString() {
StringBuilder result = new StringBuilder();
result.append(String.format("%s: %s", getClass().getSimpleName(), getMessage()));
try {
if (jobStatus != null) {
for (ErrorProto error : jobStatus.getErrors()) {
result.append("\n---------------------------------- BEGIN DEBUG INFO\n");
result.append(describeError(error));
result.append('\n');
result.append(error.getDebugInfo());
result.append("\n---------------------------------- END DEBUG INFO");
}
}
if (jsonError != null) {
String extraInfo = jsonError.toPrettyString();
result.append('\n');
result.append(extraInfo);
}
} catch (IOException e) {
result.append(e);
}
return result.toString();
}
private static String describeError(ErrorProto error) {
return String.format("%s: %s", error.getReason(), error.getMessage());
}
}

View file

@ -0,0 +1,61 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.bigquery;
import static dagger.Provides.Type.SET_VALUES;
import com.google.api.client.http.HttpRequestInitializer;
import com.google.api.client.http.HttpTransport;
import com.google.api.client.json.JsonFactory;
import com.google.api.services.bigquery.Bigquery;
import com.google.api.services.bigquery.BigqueryScopes;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.request.OAuthScopes;
import dagger.Module;
import dagger.Provides;
import java.util.Set;
/**
* Dagger module for Google {@link Bigquery} connection objects.
*
* @see com.google.domain.registry.config.ConfigModule
* @see com.google.domain.registry.request.Modules.UrlFetchTransportModule
* @see com.google.domain.registry.request.Modules.Jackson2Module
* @see com.google.domain.registry.request.Modules.AppIdentityCredentialModule
* @see com.google.domain.registry.request.Modules.UseAppIdentityCredentialForGoogleApisModule
*/
@Module
public final class BigqueryModule {
/** Provides OAuth2 scopes for the Bigquery service needed by Domain Registry. */
@Provides(type = SET_VALUES)
@OAuthScopes
static Set<String> provideBigqueryOAuthScopes() {
return BigqueryScopes.all();
}
@Provides
static Bigquery provideBigquery(
HttpTransport transport,
JsonFactory jsonFactory,
HttpRequestInitializer httpRequestInitializer,
@Config("projectId") String projectId) {
return new Bigquery.Builder(transport, jsonFactory, httpRequestInitializer)
.setApplicationName(projectId)
.build();
}
}

View file

@ -0,0 +1,169 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.bigquery;
import com.google.api.services.bigquery.model.JobReference;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormatter;
import org.joda.time.format.DateTimeFormatterBuilder;
import org.joda.time.format.DateTimeParser;
import org.joda.time.format.ISODateTimeFormat;
import java.util.concurrent.TimeUnit;
/** Utilities related to Bigquery. */
public class BigqueryUtils {
/** Bigquery modes for schema fields. */
public enum FieldMode {
NULLABLE,
REQUIRED,
REPEATED;
/** Return the name of the field mode as it should appear in the Bigquery schema. */
public String schemaName() {
return name();
}
}
/** Bigquery schema field types. */
public enum FieldType {
STRING,
INTEGER,
FLOAT,
TIMESTAMP,
RECORD,
BOOLEAN;
/** Return the name of the field type as it should appear in the Bigquery schema. */
public String schemaName() {
return name();
}
}
/** Source formats for Bigquery load jobs. */
public enum SourceFormat {
CSV,
NEWLINE_DELIMITED_JSON,
DATASTORE_BACKUP
}
/** Destination formats for Bigquery extract jobs. */
public enum DestinationFormat {
CSV,
NEWLINE_DELIMITED_JSON
}
/** Bigquery table types (i.e. regular table or view). */
public enum TableType {
TABLE,
VIEW
}
/**
* Bigquery write dispositions (i.e. what to do about writing to an existing table).
*
* @see <a href="https://developers.google.com/bigquery/docs/reference/v2/jobs">API docs</a>
*/
public enum WriteDisposition {
/** Only write to the table if there is no existing table or if it is empty. */
WRITE_EMPTY,
/** If the table already exists, overwrite it with the new data. */
WRITE_TRUNCATE,
/** If the table already exists, append the data to the table. */
WRITE_APPEND
}
/**
* A {@code DateTimeFormatter} that defines how to print DateTimes in a string format that
* BigQuery can interpret and how to parse the string formats that BigQuery emits into DateTimes.
* <p>
* The general format definition is "YYYY-MM-DD HH:MM:SS.SSS[ ZZ]", where the fractional seconds
* portion can have 0-6 decimal places (although we restrict it to 0-3 here since Joda DateTime
* only supports up to millisecond precision) and the zone if not specified defaults to UTC.
* <p>
* Although we expect a zone specification of "UTC" when parsing, we don't emit it when printing
* because in some cases BigQuery does not allow any time zone specification (instead it assumes
* UTC for whatever input you provide) for input timestamp strings (see b/16380363).
*
* @see "https://developers.google.com/bigquery/timestamp"
*/
public static final DateTimeFormatter BIGQUERY_TIMESTAMP_FORMAT = new DateTimeFormatterBuilder()
.append(ISODateTimeFormat.date())
.appendLiteral(' ')
.append(
// For printing, always print out the milliseconds.
ISODateTimeFormat.hourMinuteSecondMillis().getPrinter(),
// For parsing, we need a series of parsers to correctly handle the milliseconds.
new DateTimeParser[] {
// Try to parse the time with milliseconds first, which requires at least one
// fractional second digit, and if that fails try to parse without milliseconds.
ISODateTimeFormat.hourMinuteSecondMillis().getParser(),
ISODateTimeFormat.hourMinuteSecond().getParser()})
// Print UTC as the empty string since BigQuery's TIMESTAMP() function does not accept any
// time zone specification, but require "UTC" on parsing. Since we force this formatter to
// always use UTC below, the other arguments do not matter.
//
// TODO(b/26162667): replace this with appendLiteral(" UTC") if b/16380363 gets resolved.
.appendTimeZoneOffset("", " UTC", false, 1, 1)
.toFormatter()
.withZoneUTC();
/**
* Returns the human-readable string version of the given DateTime, suitable for conversion
* within BigQuery from a string literal into a BigQuery timestamp type.
*/
public static String toBigqueryTimestampString(DateTime dateTime) {
return BIGQUERY_TIMESTAMP_FORMAT.print(dateTime);
}
/** Returns the DateTime for a given human-readable string-formatted BigQuery timestamp. */
public static DateTime fromBigqueryTimestampString(String timestampString) {
return BIGQUERY_TIMESTAMP_FORMAT.parseDateTime(timestampString);
}
/**
* Converts a time (in TimeUnits since the epoch) into a numeric string that BigQuery understands
* as a timestamp: the decimal number of seconds since the epoch, precise up to microseconds.
*
* @see "https://developers.google.com/bigquery/timestamp"
*/
public static String toBigqueryTimestamp(long timestamp, TimeUnit unit) {
long seconds = unit.toSeconds(timestamp);
long fractionalSeconds = unit.toMicros(timestamp) % 1000000;
return String.format("%d.%06d", seconds, fractionalSeconds);
}
/**
* Converts a {@link DateTime} into a numeric string that BigQuery understands as a timestamp:
* the decimal number of seconds since the epoch, precise up to microseconds.
*
* <p>Note that since {@code DateTime} only stores milliseconds, the last 3 digits will be zero.
*
* @see "https://developers.google.com/bigquery/timestamp"
*/
public static String toBigqueryTimestamp(DateTime dateTime) {
return toBigqueryTimestamp(dateTime.getMillis(), TimeUnit.MILLISECONDS);
}
/**
* Returns the canonical string format for a JobReference object (the project ID and then job ID,
* delimited by a single colon) since JobReference.toString() is not customized to return it.
*/
public static String toJobReferenceString(JobReference jobRef) {
return jobRef.getProjectId() + ":" + jobRef.getJobId();
}
}

View file

@ -0,0 +1,16 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
@javax.annotation.ParametersAreNonnullByDefault
package com.google.domain.registry.bigquery;

View file

@ -0,0 +1,15 @@
package(default_visibility = ["//java/com/google/domain/registry:registry_project"])
java_library(
name = "braintree",
srcs = glob(["*.java"]),
deps = [
"//java/com/google/domain/registry/config",
"//java/com/google/domain/registry/keyring/api",
"//third_party/java/braintree",
"//third_party/java/dagger",
"//third_party/java/jsr305_annotations",
"//third_party/java/jsr330_inject",
],
)

View file

@ -0,0 +1,47 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.braintree;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.config.RegistryEnvironment;
import com.google.domain.registry.keyring.api.KeyModule.Key;
import com.braintreegateway.BraintreeGateway;
import dagger.Module;
import dagger.Provides;
import javax.inject.Singleton;
/** Dagger module for Braintree Payments API. */
@Module
public final class BraintreeModule {
@Provides
@Singleton
static BraintreeGateway provideBraintreeGateway(
RegistryEnvironment environment,
@Config("braintreeMerchantId") String merchantId,
@Config("braintreePublicKey") String publicKey,
@Key("braintreePrivateKey") String privateKey) {
return new BraintreeGateway(
environment == RegistryEnvironment.PRODUCTION
? com.braintreegateway.Environment.PRODUCTION
: com.braintreegateway.Environment.SANDBOX,
merchantId,
publicKey,
privateKey);
}
}

View file

@ -0,0 +1,17 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/** Braintree payment gateway utilities. */
@javax.annotation.ParametersAreNonnullByDefault
package com.google.domain.registry.braintree;

View file

@ -0,0 +1,18 @@
package(default_visibility = ["//java/com/google/domain/registry:registry_project"])
java_library(
name = "config",
srcs = glob(["*.java"]),
deps = [
"//java/com/google/common/annotations",
"//java/com/google/common/base",
"//java/com/google/common/collect",
"//java/com/google/common/net",
"//third_party/java/appengine:appengine-api",
"//third_party/java/dagger",
"//third_party/java/joda_money",
"//third_party/java/joda_time",
"//third_party/java/jsr305_annotations",
],
)

View file

@ -0,0 +1,558 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.config;
import static com.google.domain.registry.config.ConfigUtils.makeUrl;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import dagger.Module;
import dagger.Provides;
import org.joda.money.CurrencyUnit;
import org.joda.time.DateTimeConstants;
import org.joda.time.Duration;
import java.lang.annotation.Documented;
import java.net.URI;
import java.net.URL;
import javax.inject.Qualifier;
/** Dagger module for injecting configuration settings. */
@Module
public final class ConfigModule {
/** Dagger qualifier for configuration settings. */
@Qualifier
@Documented
public static @interface Config {
String value() default "";
}
private static final RegistryEnvironment registryEnvironment = RegistryEnvironment.get();
@Provides
public static RegistryEnvironment provideRegistryEnvironment() {
return registryEnvironment;
}
@Provides
public static RegistryConfig provideConfig(RegistryEnvironment environment) {
return environment.config();
}
@Provides
@Config("projectId")
public static String provideProjectId(RegistryConfig config) {
return config.getProjectId();
}
/** @see RegistryConfig#getZoneFilesBucket() */
@Provides
@Config("zoneFilesBucket")
public static String provideZoneFilesBucket(RegistryConfig config) {
return config.getZoneFilesBucket();
}
/** @see RegistryConfig#getCommitsBucket() */
@Provides
@Config("commitLogGcsBucket")
public static String provideCommitLogGcsBucket(RegistryConfig config) {
return config.getCommitsBucket();
}
/** @see RegistryConfig#getCommitLogDatastoreRetention() */
@Provides
@Config("commitLogDatastoreRetention")
public static Duration provideCommitLogDatastoreRetention(RegistryConfig config) {
return config.getCommitLogDatastoreRetention();
}
/**
* Maximum number of commit logs to delete per transaction.
*
* <p>If we assume that the average key size is 256 bytes and that each manifest has six
* mutations, we can do about 5,000 deletes in a single transaction before hitting the 10mB limit.
* Therefore 500 should be a safe number, since it's an order of a magnitude less space than we
* need.
*
* <p>Transactions also have a four minute time limit. Since we have to perform N subqueries to
* fetch mutation keys, 500 would be a safe number if those queries were performed in serial,
* since each query would have about 500ms to complete, which is an order a magnitude more time
* than we need. However this does not apply, since the subqueries are performed asynchronously.
*
* @see com.google.domain.registry.backup.DeleteOldCommitLogsAction
*/
@Provides
@Config("commitLogMaxDeletes")
public static int provideCommitLogMaxDeletes() {
return 500;
}
/**
* Batch size for the number of transactions' worth of commit log data to process at once when
* exporting a commit log diff.
*
* @see com.google.domain.registry.backup.ExportCommitLogDiffAction
*/
@Provides
@Config("commitLogDiffExportBatchSize")
public static int provideCommitLogDiffExportBatchSize() {
return 100;
}
/**
* Returns the Google Cloud Storage bucket for staging BRDA escrow deposits.
*
* @see com.google.domain.registry.rde.PendingDepositChecker
*/
@Provides
@Config("brdaBucket")
public static String provideBrdaBucket(@Config("projectId") String projectId) {
return projectId + "-icann-brda";
}
/** @see com.google.domain.registry.rde.BrdaCopyTask */
@Provides
@Config("brdaDayOfWeek")
public static int provideBrdaDayOfWeek() {
return DateTimeConstants.TUESDAY;
}
/** Amount of time between BRDA deposits. */
@Provides
@Config("brdaInterval")
public static Duration provideBrdaInterval() {
return Duration.standardDays(7);
}
/** Maximum amount of time generating an BRDA deposit for a TLD could take, before killing. */
@Provides
@Config("brdaLockTimeout")
public static Duration provideBrdaLockTimeout() {
return Duration.standardHours(5);
}
/** Returns {@code true} if the target zone should be created in DNS if it does not exist. */
@Provides
@Config("dnsCreateZone")
public static boolean provideDnsCreateZone(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return false;
default:
return true;
}
}
/**
* The maximum number of domain and host updates to batch together to send to
* PublishDnsUpdatesAction, to avoid exceeding AppEngine's limits.
* */
@Provides
@Config("dnsTldUpdateBatchSize")
public static int provideDnsTldUpdateBatchSize() {
return 100;
}
/** The maximum interval (seconds) to lease tasks from the dns-pull queue. */
@Provides
@Config("dnsWriteLockTimeout")
public static Duration provideDnsWriteLockTimeout() {
// Optimally, we would set this to a little less than the length of the DNS refresh cycle, since
// otherwise, a new PublishDnsUpdatesAction could get kicked off before the current one has
// finished, which will try and fail to acquire the lock. However, it is more important that it
// be greater than the DNS write timeout, so that if that timeout occurs, it will be cleaned up
// gracefully, rather than having the lock time out. So we have to live with the possible lock
// failures.
return Duration.standardSeconds(75);
}
/** Returns the default time to live for DNS records. */
@Provides
@Config("dnsDefaultTtl")
public static Duration provideDnsDefaultTtl() {
return Duration.standardSeconds(180);
}
/**
* Number of sharded entity group roots used for performing strongly consistent scans.
*
* <p><b>Warning:</b> This number may increase but never decrease.
*
* @see com.google.domain.registry.model.index.EppResourceIndex
*/
@Provides
@Config("eppResourceIndexBucketCount")
public static int provideEppResourceIndexBucketCount(RegistryConfig config) {
return config.getEppResourceIndexBucketCount();
}
/**
* Returns size of Google Cloud Storage client connection buffer in bytes.
*
* @see com.google.domain.registry.gcs.GcsUtils
*/
@Provides
@Config("gcsBufferSize")
public static int provideGcsBufferSize() {
return 1024 * 1024;
}
/**
* Gets the email address of the admin account for the Google App.
*
* @see com.google.domain.registry.groups.DirectoryGroupsConnection
*/
@Provides
@Config("googleAppsAdminEmailAddress")
public static String provideGoogleAppsAdminEmailAddress(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return "admin@googleregistry.co";
default:
return "admin@domainregistry-sandbox.co";
}
}
/**
* Returns the publicly accessible domain name for the running Google Apps instance.
*
* @see com.google.domain.registry.export.SyncGroupMembersTask
* @see com.google.domain.registry.tools.server.CreateGroupsTask
*/
@Provides
@Config("publicDomainName")
public static String providePublicDomainName(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return "googleregistry.co";
default:
return "domainregistry-sandbox.co";
}
}
@Provides
@Config("tmchCaTestingMode")
public static boolean provideTmchCaTestingMode(RegistryConfig config) {
return config.getTmchCaTestingMode();
}
/**
* ICANN TMCH Certificate Revocation List URL.
*
* <p>This file needs to be downloaded at least once a day and verified to make sure it was
* signed by {@code icann-tmch.crt}.
*
* @see com.google.domain.registry.tmch.TmchCrlTask
* @see "http://tools.ietf.org/html/draft-lozano-tmch-func-spec-08#section-5.2.3.2"
*/
@Provides
@Config("tmchCrlUrl")
public static URL provideTmchCrlUrl(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return makeUrl("http://crl.icann.org/tmch.crl");
default:
return makeUrl("http://crl.icann.org/tmch_pilot.crl");
}
}
@Provides
@Config("tmchMarksdbUrl")
public static String provideTmchMarksdbUrl(RegistryConfig config) {
return config.getTmchMarksdbUrl();
}
/**
* Returns the Google Cloud Storage bucket for staging escrow deposits pending upload.
*
* @see com.google.domain.registry.rde.RdeStagingAction
*/
@Provides
@Config("rdeBucket")
public static String provideRdeBucket(@Config("projectId") String projectId) {
return projectId + "-rde";
}
/**
* Size of Ghostryde buffer in bytes for each layer in the pipeline.
*
* @see com.google.domain.registry.rde.Ghostryde
*/
@Provides
@Config("rdeGhostrydeBufferSize")
public static Integer provideRdeGhostrydeBufferSize() {
return 64 * 1024;
}
/** Amount of time between RDE deposits. */
@Provides
@Config("rdeInterval")
public static Duration provideRdeInterval() {
return Duration.standardDays(1);
}
/** Maximum amount of time for sending a small XML file to ICANN via HTTP, before killing. */
@Provides
@Config("rdeReportLockTimeout")
public static Duration provideRdeReportLockTimeout() {
return Duration.standardSeconds(60);
}
/**
* URL of ICANN's HTTPS server to which the RDE report should be {@code PUT}.
*
* <p>You must append {@code "/TLD/ID"} to this URL.
*
* @see com.google.domain.registry.rde.RdeReportTask
*/
@Provides
@Config("rdeReportUrlPrefix")
public static String provideRdeReportUrlPrefix(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return "https://ry-api.icann.org/report/registry-escrow-report";
default:
return "https://test-ry-api.icann.org:8543/report/registry-escrow-report";
}
}
/**
* Size of RYDE generator buffer in bytes for each of the five layers.
*
* @see com.google.domain.registry.rde.RydePgpCompressionOutputStream
* @see com.google.domain.registry.rde.RydePgpFileOutputStream
* @see com.google.domain.registry.rde.RydePgpSigningOutputStream
* @see com.google.domain.registry.rde.RydeTarOutputStream
*/
@Provides
@Config("rdeRydeBufferSize")
public static Integer provideRdeRydeBufferSize() {
return 64 * 1024;
}
/** Maximum amount of time generating an escrow deposit for a TLD could take, before killing. */
@Provides
@Config("rdeStagingLockTimeout")
public static Duration provideRdeStagingLockTimeout() {
return Duration.standardHours(5);
}
/** Maximum amount of time it should ever take to upload an escrow deposit, before killing. */
@Provides
@Config("rdeUploadLockTimeout")
public static Duration provideRdeUploadLockTimeout() {
return Duration.standardMinutes(30);
}
/**
* Minimum amount of time to wait between consecutive SFTP uploads on a single TLD.
*
* <p>This value was communicated to us by the escrow provider.
*/
@Provides
@Config("rdeUploadSftpCooldown")
public static Duration provideRdeUploadSftpCooldown() {
return Duration.standardHours(2);
}
/**
* Returns SFTP URL containing a username, hostname, port (optional), and directory (optional) to
* which cloud storage files are uploaded. The password should not be included, as it's better to
* use public key authentication.
*
* @see com.google.domain.registry.rde.RdeUploadTask
*/
@Provides
@Config("rdeUploadUrl")
public static URI provideRdeUploadUrl(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return URI.create("sftp://GoogleTLD@sftpipm2.ironmountain.com/Outbox");
default:
return URI.create("sftp://google@ppftpipm.ironmountain.com/Outbox");
}
}
/** Maximum amount of time for syncing a spreadsheet, before killing. */
@Provides
@Config("sheetLockTimeout")
public static Duration provideSheetLockTimeout() {
return Duration.standardHours(1);
}
/**
* Returns ID of Google Spreadsheet to which Registrar entities should be synced.
*
* <p>This ID, as you'd expect, comes from the URL of the spreadsheet.
*
* @see com.google.domain.registry.export.sheet.SyncRegistrarsSheetTask
*/
@Provides
@Config("sheetRegistrarId")
public static Optional<String> provideSheetRegistrarId(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return Optional.of("1n2Gflqsgo9iDXcdt9VEskOVySZ8qIhQHJgjqsleCKdE");
case ALPHA:
case CRASH:
return Optional.of("16BwRt6v11Iw-HujCbAkmMxqw3sUG13B8lmXLo-uJTsE");
case SANDBOX:
return Optional.of("1TlR_UMCtfpkxT9oUEoF5JEbIvdWNkLRuURltFkJ_7_8");
case QA:
return Optional.of("1RoY1XZhLLwqBkrz0WbEtaT9CU6c8nUAXfId5BtM837o");
default:
return Optional.absent();
}
}
/** Amount of time between synchronizations of the Registrar spreadsheet. */
@Provides
@Config("sheetRegistrarInterval")
public static Duration provideSheetRegistrarInterval() {
return Duration.standardHours(1);
}
/**
* Returns SSH client connection and read timeout.
*
* @see com.google.domain.registry.rde.RdeUploadTask
*/
@Provides
@Config("sshTimeout")
public static Duration provideSshTimeout() {
return Duration.standardSeconds(30);
}
/** Duration after watermark where we shouldn't deposit, because transactions might be pending. */
@Provides
@Config("transactionCooldown")
public static Duration provideTransactionCooldown() {
return Duration.standardMinutes(5);
}
/**
* Number of times to retry a GAE operation when {@code TransientFailureException} is thrown.
*
* <p>The number of milliseconds it'll sleep before giving up is {@code 2^n - 2}.
*
* @see com.google.domain.registry.util.TaskEnqueuer
*/
@Provides
@Config("transientFailureRetries")
public static int provideTransientFailureRetries() {
return 12; // Four seconds.
}
/**
* Amount of time public HTTP proxies are permitted to cache our WHOIS responses.
*
* @see com.google.domain.registry.whois.WhoisHttpServer
*/
@Provides
@Config("whoisHttpExpires")
public static Duration provideWhoisHttpExpires() {
return Duration.standardDays(1);
}
/**
* Maximum number of results to return for an RDAP search query
*
* @see com.google.domain.registry.rdap.RdapActionBase
*/
@Provides
@Config("rdapResultSetMaxSize")
public static int provideRdapResultSetMaxSize() {
return 100;
}
/**
* Base for RDAP link paths.
*
* @see com.google.domain.registry.rdap.RdapActionBase
*/
@Provides
@Config("rdapLinkBase")
public static String provideRdapLinkBase() {
return "https://nic.google/rdap/";
}
/**
* WHOIS server displayed in RDAP query responses.
*
* @see com.google.domain.registry.rdap.RdapActionBase
*/
@Provides
@Config("rdapWhoisServer")
public static String provideRdapWhoisServer() {
return "whois.nic.google";
}
/** Returns Braintree Merchant Account IDs for each supported currency. */
@Provides
@Config("braintreeMerchantAccountIds")
public static ImmutableMap<CurrencyUnit, String> provideBraintreeMerchantAccountId(
RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return ImmutableMap.of(
CurrencyUnit.USD, "charlestonregistryUSD",
CurrencyUnit.JPY, "charlestonregistryJPY");
default:
return ImmutableMap.of(
CurrencyUnit.USD, "google",
CurrencyUnit.JPY, "google-jpy");
}
}
/**
* Returns Braintree Merchant ID of Registry, used for accessing Braintree API.
*
* <p>This is a base32 value copied from the Braintree website.
*/
@Provides
@Config("braintreeMerchantId")
public static String provideBraintreeMerchantId(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return "TODO(b/25619518): Add production Braintree API credentials";
default:
// Valentine: Domain Registry Braintree Sandbox
return "vqgn8khkq2cs6y9s";
}
}
/**
* Returns Braintree Public Key of Registry, used for accessing Braintree API.
*
* <p>This is a base32 value copied from the Braintree website.
*
* @see com.google.domain.registry.keyring.api.Keyring#getBraintreePrivateKey()
*/
@Provides
@Config("braintreePublicKey")
public static String provideBraintreePublicKey(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return "tzcfxggzgbh2jg5x";
default:
// Valentine: Domain Registry Braintree Sandbox
return "tzcyzvm3mn7zkdnx";
}
}
}

View file

@ -0,0 +1,37 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.config;
import java.net.MalformedURLException;
import java.net.URL;
/** Helper methods for configuration classes. */
final class ConfigUtils {
/**
* Creates a URL instance.
*
* @throws RuntimeException to rethrow {@link MalformedURLException}
*/
static URL makeUrl(String url) {
try {
return new URL(url);
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
private ConfigUtils() {}
}

View file

@ -0,0 +1,252 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.config;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.net.HostAndPort;
import org.joda.time.Duration;
import java.net.URL;
/**
* Domain Registry configuration.
*
* <p>The goal of this custom configuration system is to have our project environments configured
* in type-safe Java code that can be refactored, rather than XML files and system properties.
*
* <p><b>Note:</b> This interface is deprecated by {@link ConfigModule}.
*/
public interface RegistryConfig {
/**
* Returns the App Engine project ID, which is based off the environment name.
*/
public String getProjectId();
/**
* Returns the Google Cloud Storage bucket for storing backup snapshots.
*
* @see com.google.domain.registry.export.ExportSnapshotServlet
*/
public String getSnapshotsBucket();
/**
* Returns the BigQuery dataset for storing directly imported datastore snapshots.
*
* @see com.google.domain.registry.export.LoadSnapshotServlet
*/
public String getSnapshotsDataset();
/**
* Returns the BigQuery dataset for storing views pointing to the latest datastore snapshot.
*
* @see com.google.domain.registry.export.UpdateSnapshotViewServlet
*/
public String getLatestSnapshotDataset();
/**
* Number of sharded commit log buckets.
*
* <p>This number is crucial for determining how much transactional throughput the system can
* allow, because it determines how many entity groups are available for writing commit logs.
* Since entity groups have a one transaction per second SLA (which is actually like ten in
* practice), a registry that wants to be able to handle one hundred transactions per second
* should have one hundred buckets.
*
* <p><b>Warning:</b> This can be raised but never lowered.
*
* @see com.google.domain.registry.model.ofy.CommitLogBucket
*/
public int getCommitLogBucketCount();
/**
* Returns the length of time before commit logs should be deleted from datastore.
*
* <p>The only reason you'll want to retain this commit logs in datastore is for performing
* point-in-time restoration queries for subsystems like RDE.
*
* @see com.google.domain.registry.backup.DeleteOldCommitLogsAction
* @see com.google.domain.registry.model.translators.CommitLogRevisionsTranslatorFactory
*/
public Duration getCommitLogDatastoreRetention();
/**
* Returns the Google Cloud Storage bucket for storing commit logs.
*
* @see com.google.domain.registry.backup.ExportCommitLogDiffAction
*/
public String getCommitsBucket();
/**
* Returns the Google Cloud Storage bucket for storing zone files.
*
* @see com.google.domain.registry.backup.ExportCommitLogDiffAction
*/
public String getZoneFilesBucket();
/**
* Returns {@code true} if TMCH certificate authority should be in testing mode.
*
* @see com.google.domain.registry.tmch.TmchCertificateAuthority
*/
public boolean getTmchCaTestingMode();
/**
* URL prefix for communicating with MarksDB ry interface.
*
* <p>This URL is used for DNL, SMDRL, and LORDN.
*
* @see com.google.domain.registry.tmch.Marksdb
* @see com.google.domain.registry.tmch.NordnUploadAction
*/
public String getTmchMarksdbUrl();
public Optional<String> getECatcherAddress();
/**
* Returns the address of the Domain Registry app HTTP server.
*
* <p>This is used by {@code registry_tool} to connect to the App Engine remote API.
*/
public HostAndPort getServer();
/** Returns the amount of time a singleton should be cached, before expiring. */
public Duration getSingletonCacheRefreshDuration();
/**
* Returns the amount of time a domain label list should be cached in memory before expiring.
*
* @see com.google.domain.registry.model.registry.label.ReservedList
* @see com.google.domain.registry.model.registry.label.PremiumList
*/
public Duration getDomainLabelListCacheDuration();
/** Returns the amount of time a singleton should be cached in persist mode, before expiring. */
public Duration getSingletonCachePersistDuration();
/**
* Returns the header text at the top of the reserved terms exported list.
*
* @see com.google.domain.registry.export.ExportUtils#exportReservedTerms
*/
public String getReservedTermsExportDisclaimer();
/**
* Returns a display name that is used on outgoing emails sent by Domain Registry.
*
* @see com.google.domain.registry.util.SendEmailUtils
*/
public String getGoogleAppsAdminEmailDisplayName();
/**
* Returns the email address that outgoing emails from the app are sent from.
*
* @see com.google.domain.registry.util.SendEmailUtils
*/
public String getGoogleAppsSendFromEmailAddress();
/**
* Returns the roid suffix to be used for the roids of all contacts and hosts. E.g. a value of
* "ROID" would end up creating roids that look like "ABC123-ROID".
*
* @see <a href="http://www.iana.org/assignments/epp-repository-ids/epp-repository-ids.xhtml">
* Extensible Provisioning Protocol (EPP) Repository Identifiers</a>
*/
public String getContactAndHostRepositoryIdentifier();
/**
* Returns the email address(es) that notifications of registrar and/or registrar contact updates
* should be sent to, or the empty list if updates should not be sent.
*
* @see com.google.domain.registry.ui.server.registrar.RegistrarServlet
*/
public ImmutableList<String> getRegistrarChangesNotificationEmailAddresses();
/**
* Returns the relativeURL path to the admin servlet, e.g. "/_dr/admin".
*/
public String getAdminServletPathPrefix();
/**
* Returns default WHOIS server to use when {@code Registrar#getWhoisServer()} is {@code null}.
*
* @see "com.google.domain.registry.whois.DomainWhoisResponse"
* @see "com.google.domain.registry.whois.RegistrarWhoisResponse"
*/
public String getRegistrarDefaultWhoisServer();
/**
* Returns the default referral URL that is used unless registrars have specified otherwise.
*/
public URL getRegistrarDefaultReferralUrl();
/**
* Returns whether the registrar console is enabled.
*/
public boolean isRegistrarConsoleEnabled();
/**
* Returns the title of the project used in generating documentation.
*/
public String getDocumentationProjectTitle();
/**
* Returns the maximum number of entities that can be checked at one time in an EPP check flow.
*/
public int getMaxChecks();
/**
* Returns the number of EppResourceIndex buckets to be used.
*/
public int getEppResourceIndexBucketCount();
/**
* Returns the base duration that gets doubled on each retry within {@code Ofy}.
*/
public Duration getBaseOfyRetryDuration();
/**
* Returns the global automatic transfer length for contacts. After this amount of time has
* elapsed, the transfer is automatically improved.
*/
public Duration getContactAutomaticTransferLength();
/**
* Returns the clientId of the registrar used by the {@code CheckApiServlet}.
*/
public String getCheckApiServletRegistrarClientId();
/**
* Returns the delay before executing async delete flow mapreduces.
*
* <p>This delay should be sufficiently longer than a transaction, to solve the following problem:
* <ul>
* <li>a domain mutation flow starts a transaction
* <li>the domain flow non-transactionally reads a resource and sees that it's not in
* PENDING_DELETE
* <li>the domain flow creates a new reference to this resource
* <li>a contact/host delete flow runs and marks the resource PENDING_DELETE and commits
* <li>the domain flow commits
* </ul>
*
* <p>Although we try not to add references to a PENDING_DELETE resource, strictly speaking that
* is ok as long as the mapreduce eventually sees the new reference (and therefore asynchronously
* fails the delete). Without this delay, the mapreduce might have started before the domain flow
* committed, and could potentially miss the reference.
*/
public Duration getAsyncDeleteFlowMapreduceDelay();
}

View file

@ -0,0 +1,90 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.config;
import com.google.common.annotations.VisibleForTesting;
import javax.annotation.Nullable;
/** Registry environments. */
public enum RegistryEnvironment {
/** Production environment. */
PRODUCTION,
/** Development environment. */
ALPHA,
/** Load/Backup/Restore Testing environment. */
CRASH,
/** Local machine environment. */
LOCAL,
/** Quality Assurance environment. */
QA,
/** Sandbox environment. */
SANDBOX,
/**
* Unit testing environment.
*
* <p>This is the default enum value. This is because it's non-trivial to configure the system
* property that specifies the environment in our unit tests.
*
* <p>Do not use this environment outside of unit tests.
*/
UNITTEST;
/** Returns environment configured by system property {@value #PROPERTY}. */
public static RegistryEnvironment get() {
return valueOf(System.getProperty(PROPERTY, UNITTEST.name()).toUpperCase());
}
/**
* Returns configuration for this Domain Registry environment.
*
* <p><b>WARNING:</b> Do not store this value to a static field, otherwise you won't be able to
* override it for testing. You should instead store the environment object to a static field.
*/
public RegistryConfig config() {
if (configOverride != null) {
return configOverride;
} else if (this == UNITTEST) {
return testingConfig;
} else {
return config;
}
}
/** Globally override registry configuration from within a unit test. */
@VisibleForTesting
public static void overrideConfigurationForTesting(@Nullable RegistryConfig newConfig) {
configOverride = newConfig;
}
@Nullable
private static RegistryConfig configOverride;
// TODO(b/19247780) Use true dependency injection for this. In the mean time, if you're not
// Google, you'll need to change this to include your own config class implementation at compile
// time.
private static final RegistryConfig testingConfig = new TestRegistryConfig();
private final RegistryConfig config = new TestRegistryConfig();
/** System property for configuring which environment we should use. */
public static final String PROPERTY = "com.google.domain.registry.environment";
}

View file

@ -0,0 +1,191 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.config;
import static com.google.domain.registry.config.ConfigUtils.makeUrl;
import static org.joda.time.Duration.standardDays;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.net.HostAndPort;
import org.joda.time.Duration;
import java.net.URL;
/**
* An implementation of RegistryConfig for unit testing that contains suitable testing data.
*/
public class TestRegistryConfig implements RegistryConfig {
public TestRegistryConfig() {}
@Override
public String getProjectId() {
return "domain-registry";
}
@Override
public int getCommitLogBucketCount() {
return 1;
}
@Override
public Duration getCommitLogDatastoreRetention() {
return Duration.standardDays(30);
}
@Override
public String getSnapshotsBucket() {
return getProjectId() + "-snapshots";
}
@Override
public String getSnapshotsDataset() {
return "snapshots";
}
@Override
public String getLatestSnapshotDataset() {
return "latest_snapshot";
}
@Override
public String getCommitsBucket() {
return getProjectId() + "-commits";
}
@Override
public String getZoneFilesBucket() {
return getProjectId() + "-zonefiles";
}
@Override
public boolean getTmchCaTestingMode() {
return true;
}
@Override
public String getTmchMarksdbUrl() {
return "https://ry.marksdb.org";
}
@Override
public Optional<String> getECatcherAddress() {
throw new UnsupportedOperationException();
}
@Override
public HostAndPort getServer() {
throw new UnsupportedOperationException();
}
@Override
public Duration getSingletonCacheRefreshDuration() {
// All cache durations are set to zero so that unit tests can update and then retrieve data
// immediately without failure.
return Duration.ZERO;
}
@Override
public Duration getDomainLabelListCacheDuration() {
return Duration.ZERO;
}
@Override
public Duration getSingletonCachePersistDuration() {
return Duration.ZERO;
}
@Override
public String getReservedTermsExportDisclaimer() {
return "This is a disclaimer.\n";
}
@Override
public String getGoogleAppsAdminEmailDisplayName() {
return "Testing Domain Registry";
}
@Override
public String getGoogleAppsSendFromEmailAddress() {
return "noreply@testing.example";
}
@Override
public ImmutableList<String> getRegistrarChangesNotificationEmailAddresses() {
return ImmutableList.of("notification@test.example", "notification2@test.example");
}
@Override
public String getAdminServletPathPrefix() {
return "/_dr/admin";
}
@Override
public String getRegistrarDefaultWhoisServer() {
return "whois.nic.fakewhois.example";
}
@Override
public URL getRegistrarDefaultReferralUrl() {
return makeUrl("http://www.referral.example/path");
}
@Override
public boolean isRegistrarConsoleEnabled() {
return true;
}
@Override
public String getDocumentationProjectTitle() {
return "Domain Registry";
}
@Override
public int getMaxChecks() {
return 50;
}
@Override
public int getEppResourceIndexBucketCount() {
return 2;
}
@Override
public Duration getBaseOfyRetryDuration() {
return Duration.ZERO;
}
@Override
public String getContactAndHostRepositoryIdentifier() {
return "ROID";
}
@Override
public Duration getContactAutomaticTransferLength() {
return standardDays(5);
}
@Override
public String getCheckApiServletRegistrarClientId() {
return "TheRegistrar";
}
@Override
public Duration getAsyncDeleteFlowMapreduceDelay() {
return Duration.standardSeconds(90);
}
}

View file

@ -0,0 +1,16 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
@javax.annotation.ParametersAreNonnullByDefault
package com.google.domain.registry.config;

View file

@ -0,0 +1,25 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
java_library(
name = "cron",
srcs = glob(["*.java"]),
deps = [
"//java/com/google/common/annotations",
"//java/com/google/common/base",
"//java/com/google/common/collect",
"//java/com/google/common/net",
"//java/com/google/domain/registry/model",
"//java/com/google/domain/registry/request",
"//java/com/google/domain/registry/util",
"//third_party/java/appengine:appengine-api",
"//third_party/java/dagger",
"//third_party/java/joda_time",
"//third_party/java/jsr305_annotations",
"//third_party/java/jsr330_inject",
"//third_party/java/objectify:objectify-v4_1",
"//third_party/java/servlet/servlet_api",
],
)

View file

@ -0,0 +1,58 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.cron;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static java.util.concurrent.TimeUnit.SECONDS;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.common.base.Optional;
import com.google.domain.registry.model.ofy.CommitLogBucket;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.util.TaskEnqueuer;
import java.util.Random;
import javax.inject.Inject;
/** Action for fanning out cron tasks for each commit log bucket. */
@Action(path = "/_dr/cron/commitLogFanout", automaticallyPrintOk = true)
public final class CommitLogFanoutAction implements Runnable {
public static final String BUCKET_PARAM = "bucket";
private static final Random random = new Random();
@Inject TaskEnqueuer taskEnqueuer;
@Inject @Parameter("endpoint") String endpoint;
@Inject @Parameter("queue") String queue;
@Inject @Parameter("jitterSeconds") Optional<Integer> jitterSeconds;
@Inject CommitLogFanoutAction() {}
@Override
public void run() {
Queue taskQueue = getQueue(queue);
for (int bucketId : CommitLogBucket.getBucketIds()) {
TaskOptions taskOptions = TaskOptions.Builder.withUrl(endpoint)
.param(BUCKET_PARAM, Integer.toString(bucketId))
.countdownMillis(jitterSeconds.isPresent()
? random.nextInt((int) SECONDS.toMillis(jitterSeconds.get()))
: 0);
taskEnqueuer.enqueue(taskQueue, taskOptions);
}
}
}

View file

@ -0,0 +1,76 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.cron;
import static com.google.domain.registry.request.RequestParameters.extractBooleanParameter;
import static com.google.domain.registry.request.RequestParameters.extractOptionalIntParameter;
import static com.google.domain.registry.request.RequestParameters.extractRequiredParameter;
import static com.google.domain.registry.request.RequestParameters.extractSetOfParameters;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableSet;
import com.google.domain.registry.request.Parameter;
import dagger.Module;
import dagger.Provides;
import javax.servlet.http.HttpServletRequest;
/** Dagger module for the cron package. */
@Module
public final class CronModule {
@Provides
@Parameter("endpoint")
static String provideEndpoint(HttpServletRequest req) {
return extractRequiredParameter(req, "endpoint");
}
@Provides
@Parameter("exclude")
static ImmutableSet<String> provideExcludes(HttpServletRequest req) {
return extractSetOfParameters(req, "exclude");
}
@Provides
@Parameter("queue")
static String provideQueue(HttpServletRequest req) {
return extractRequiredParameter(req, "queue");
}
@Provides
@Parameter("runInEmpty")
static boolean provideRunInEmpty(HttpServletRequest req) {
return extractBooleanParameter(req, "runInEmpty");
}
@Provides
@Parameter("forEachRealTld")
static boolean provideForEachRealTld(HttpServletRequest req) {
return extractBooleanParameter(req, "forEachRealTld");
}
@Provides
@Parameter("forEachTestTld")
static boolean provideForEachTestTld(HttpServletRequest req) {
return extractBooleanParameter(req, "forEachTestTld");
}
@Provides
@Parameter("jitterSeconds")
static Optional<Integer> provideJitterSeconds(HttpServletRequest req) {
return extractOptionalIntParameter(req, "jitterSeconds");
}
}

View file

@ -0,0 +1,134 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.cron;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static com.google.appengine.api.taskqueue.TaskOptions.Builder.withUrl;
import static com.google.common.base.Predicates.in;
import static com.google.common.base.Predicates.not;
import static com.google.common.base.Strings.nullToEmpty;
import static com.google.common.collect.Iterables.concat;
import static com.google.common.collect.Iterables.getFirst;
import static com.google.common.collect.Multimaps.filterKeys;
import static com.google.common.collect.Sets.difference;
import static com.google.domain.registry.model.registry.Registries.getTldsOfType;
import static com.google.domain.registry.model.registry.Registry.TldType.REAL;
import static com.google.domain.registry.model.registry.Registry.TldType.TEST;
import static java.util.concurrent.TimeUnit.SECONDS;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableListMultimap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Multimap;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.request.ParameterMap;
import com.google.domain.registry.request.RequestParameters;
import com.google.domain.registry.util.TaskEnqueuer;
import java.util.Random;
import java.util.Set;
import javax.inject.Inject;
/**
* Action for fanning out cron tasks shared by TLD.
*
* <h3>Parameters Reference</h3>
*
* <ul>
* <li>{@code endpoint} (Required) URL path of servlet to launch. This may contain pathargs.
* <li>{@code queue} (Required) Name of the App Engine push queue to which this task should be sent.
* <li>{@code forEachRealTld} Launch the task in each real TLD namespace.
* <li>{@code forEachTestTld} Launch the task in each test TLD namespace.
* <li>{@code runInEmpty} Launch the task in the empty namespace.
* <li>{@code exclude} TLDs to exclude.
* <li>{@code jitterSeconds} Randomly delay each task by up to this many seconds.
* <li>Any other parameters specified will be passed through as POST parameters to the called task.
* </ul>
*
* <h3>Patharg Reference</h3>
*
* <p>The following values may be specified inside the "endpoint" param.
* <ul>
* <li>{@code :tld} Substituted with an ASCII tld, if tld fanout is enabled.
* This patharg is mostly useful for aesthetic purposes, since tasks are already namespaced.
* </ul>
*/
@Action(path = "/_dr/cron/fanout", automaticallyPrintOk = true)
public final class TldFanoutAction implements Runnable {
private static final String ENDPOINT_PARAM = "endpoint";
private static final String QUEUE_PARAM = "queue";
private static final String FOR_EACH_REAL_TLD_PARAM = "forEachRealTld";
private static final String FOR_EACH_TEST_TLD_PARAM = "forEachTestTld";
private static final String RUN_IN_EMPTY_PARAM = "runInEmpty";
private static final String EXCLUDE_PARAM = "exclude";
private static final String JITTER_SECONDS_PARAM = "jitterSeconds";
/** A set of control params to TldFanoutAction that aren't passed down to the executing action. */
private static final Set<String> CONTROL_PARAMS = ImmutableSet.of(
ENDPOINT_PARAM,
QUEUE_PARAM,
FOR_EACH_REAL_TLD_PARAM,
FOR_EACH_TEST_TLD_PARAM,
RUN_IN_EMPTY_PARAM,
EXCLUDE_PARAM,
JITTER_SECONDS_PARAM);
private static final String TLD_PATHARG = ":tld";
private static final Random random = new Random();
@Inject TaskEnqueuer taskEnqueuer;
@Inject @Parameter(ENDPOINT_PARAM) String endpoint;
@Inject @Parameter(QUEUE_PARAM) String queue;
@Inject @Parameter(FOR_EACH_REAL_TLD_PARAM) boolean forEachRealTld;
@Inject @Parameter(FOR_EACH_TEST_TLD_PARAM) boolean forEachTestTld;
@Inject @Parameter(RUN_IN_EMPTY_PARAM) boolean runInEmpty;
@Inject @Parameter(EXCLUDE_PARAM) ImmutableSet<String> excludes;
@Inject @Parameter(JITTER_SECONDS_PARAM) Optional<Integer> jitterSeconds;
@Inject @ParameterMap ImmutableListMultimap<String, String> params;
@Inject TldFanoutAction() {}
@Override
public void run() {
Set<String> namespaces = ImmutableSet.copyOf(concat(
runInEmpty ? ImmutableSet.of("") : ImmutableSet.<String>of(),
forEachRealTld ? getTldsOfType(REAL) : ImmutableSet.<String>of(),
forEachTestTld ? getTldsOfType(TEST) : ImmutableSet.<String>of()));
Multimap<String, String> flowThruParams = filterKeys(params, not(in(CONTROL_PARAMS)));
Queue taskQueue = getQueue(queue);
for (String namespace : difference(namespaces, excludes)) {
taskEnqueuer.enqueue(taskQueue, createTaskOptions(namespace, flowThruParams));
}
}
private TaskOptions createTaskOptions(String tld, Multimap<String, String> params) {
TaskOptions options =
withUrl(endpoint.replace(TLD_PATHARG, String.valueOf(tld)))
.countdownMillis(
jitterSeconds.isPresent()
? random.nextInt((int) SECONDS.toMillis(jitterSeconds.get()))
: 0);
options.param(RequestParameters.PARAM_TLD, tld);
for (String param : params.keySet()) {
// TaskOptions.param() does not accept null values.
options.param(param, nullToEmpty((getFirst(params.get(param), null))));
}
return options;
}
}

View file

@ -0,0 +1,16 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
@javax.annotation.ParametersAreNonnullByDefault
package com.google.domain.registry.cron;

View file

@ -0,0 +1,41 @@
# Description:
# Routines to publish authoritative DNS.
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
java_library(
name = "constants",
srcs = ["DnsConstants.java"],
)
java_library(
name = "dns",
srcs = glob(
["*.java"],
exclude = ["DnsConstants.java"],
),
deps = [
":constants",
"//java/com/google/common/annotations",
"//java/com/google/common/base",
"//java/com/google/common/collect",
"//java/com/google/common/html",
"//java/com/google/common/io",
"//java/com/google/common/net",
"//java/com/google/domain/registry/config",
"//java/com/google/domain/registry/dns/writer/api",
"//java/com/google/domain/registry/model",
"//java/com/google/domain/registry/request",
"//java/com/google/domain/registry/util",
"//third_party/java/appengine:appengine-api",
"//third_party/java/dagger",
"//third_party/java/joda_time",
"//third_party/java/jsr305_annotations",
"//third_party/java/jsr330_inject",
"//third_party/java/objectify:objectify-v4_1",
"//third_party/java/servlet/servlet_api",
],
)

View file

@ -0,0 +1,35 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns;
/** Static class for DNS-related constants. */
public class DnsConstants {
private DnsConstants() {}
/** The name of the DNS pull queue. */
public static final String DNS_PULL_QUEUE_NAME = "dns-pull"; // See queue.xml.
/** The name of the DNS publish push queue. */
public static final String DNS_PUBLISH_PUSH_QUEUE_NAME = "dns-publish"; // See queue.xml.
/** The parameter to use for storing the target type ("domain" or "host" or "zone"). */
public static final String DNS_TARGET_TYPE_PARAM = "Target-Type";
/** The parameter to use for storing the target name (domain or host name) with the task. */
public static final String DNS_TARGET_NAME_PARAM = "Target-Name";
/** The possible values of the {@code DNS_TARGET_NAME_PARAM} parameter. */
public enum TargetType { DOMAIN, HOST, ZONE }
}

View file

@ -0,0 +1,93 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns;
import static com.google.domain.registry.dns.DnsConstants.DNS_PUBLISH_PUSH_QUEUE_NAME;
import static com.google.domain.registry.dns.DnsConstants.DNS_PULL_QUEUE_NAME;
import static com.google.domain.registry.dns.PublishDnsUpdatesAction.DOMAINS_PARAM;
import static com.google.domain.registry.dns.PublishDnsUpdatesAction.HOSTS_PARAM;
import static com.google.domain.registry.dns.ReadDnsQueueAction.KEEP_TASKS_PARAM;
import static com.google.domain.registry.request.RequestParameters.extractBooleanParameter;
import static com.google.domain.registry.request.RequestParameters.extractEnumParameter;
import static com.google.domain.registry.request.RequestParameters.extractRequiredParameter;
import static com.google.domain.registry.request.RequestParameters.extractSetOfParameters;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.QueueFactory;
import com.google.domain.registry.dns.DnsConstants.TargetType;
import com.google.domain.registry.dns.writer.api.DnsWriterZone;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.request.RequestParameters;
import dagger.Module;
import dagger.Provides;
import java.util.Set;
import javax.inject.Named;
import javax.servlet.http.HttpServletRequest;
/** Dagger module for the dns package. */
@Module
public final class DnsModule {
@Provides
@DnsWriterZone
static String provideZoneName(@Parameter(RequestParameters.PARAM_TLD) String tld) {
return tld;
}
@Provides
@Named(DNS_PULL_QUEUE_NAME)
static Queue provideDnsPullQueue() {
return QueueFactory.getQueue(DNS_PULL_QUEUE_NAME);
}
@Provides
@Named(DNS_PUBLISH_PUSH_QUEUE_NAME)
static Queue provideDnsUpdatePushQueue() {
return QueueFactory.getQueue(DNS_PUBLISH_PUSH_QUEUE_NAME);
}
@Provides
@Parameter(DOMAINS_PARAM)
static Set<String> provideDomains(HttpServletRequest req) {
return extractSetOfParameters(req, DOMAINS_PARAM);
}
@Provides
@Parameter(HOSTS_PARAM)
static Set<String> provideHosts(HttpServletRequest req) {
return extractSetOfParameters(req, HOSTS_PARAM);
}
@Provides
@Parameter(KEEP_TASKS_PARAM)
static boolean provideKeepTasks(HttpServletRequest req) {
return extractBooleanParameter(req, KEEP_TASKS_PARAM);
}
@Provides
@Parameter("name")
static String provideName(HttpServletRequest req) {
return extractRequiredParameter(req, "name");
}
@Provides
@Parameter("type")
static TargetType provideType(HttpServletRequest req) {
return extractEnumParameter(req, TargetType.class, "type");
}
}

View file

@ -0,0 +1,161 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Strings.isNullOrEmpty;
import static com.google.domain.registry.dns.DnsConstants.DNS_PULL_QUEUE_NAME;
import static com.google.domain.registry.dns.DnsConstants.DNS_TARGET_NAME_PARAM;
import static com.google.domain.registry.dns.DnsConstants.DNS_TARGET_TYPE_PARAM;
import static com.google.domain.registry.model.domain.DomainUtils.getTldFromDomainName;
import static com.google.domain.registry.model.registry.Registries.assertTldExists;
import static com.google.domain.registry.request.RequestParameters.PARAM_TLD;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.QueueConstants;
import com.google.appengine.api.taskqueue.QueueFactory;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.appengine.api.taskqueue.TaskOptions.Method;
import com.google.appengine.api.taskqueue.TransientFailureException;
import com.google.apphosting.api.DeadlineExceededException;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.net.InternetDomainName;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.dns.DnsConstants.TargetType;
import com.google.domain.registry.model.registry.Registries;
import com.google.domain.registry.util.FormattingLogger;
import org.joda.time.Duration;
import java.util.List;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.inject.Named;
/** Methods for manipulating the queue used for DNS write tasks. */
public class DnsQueue {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@Inject @Config("dnsWriteLockTimeout") Duration writeLockTimeout;
@Inject @Named(DNS_PULL_QUEUE_NAME) Queue queue;
@Inject DnsQueue() {}
long writeBatchSize = QueueConstants.maxLeaseCount();
/**
* Enqueues the given task type with the given target name to the DNS queue, tagged with the
* specified TLD.
*/
private TaskHandle addToQueue(TargetType targetType, String targetName, String tld) {
return queue.add(TaskOptions.Builder
// TODO(b/24564175): don't set the tag
.withTag(tld)
.method(Method.PULL)
.param(DNS_TARGET_TYPE_PARAM, targetType.toString())
.param(DNS_TARGET_NAME_PARAM, targetName)
.param(PARAM_TLD, tld));
}
/**
* Adds a task to the queue to refresh the DNS information for the specified subordinate host.
*/
public TaskHandle addHostRefreshTask(String fullyQualifiedHostName) {
Optional<InternetDomainName> tld =
Registries.findTldForName(InternetDomainName.from(fullyQualifiedHostName));
checkArgument(tld.isPresent(),
String.format("%s is not a subordinate host to a known tld", fullyQualifiedHostName));
return addToQueue(TargetType.HOST, fullyQualifiedHostName, tld.get().toString());
}
/** Adds a task to the queue to refresh the DNS information for the specified domain. */
public TaskHandle addDomainRefreshTask(String fullyQualifiedDomainName) {
return addToQueue(
TargetType.DOMAIN,
fullyQualifiedDomainName,
assertTldExists(getTldFromDomainName(fullyQualifiedDomainName)));
}
/** Adds a task to the queue to refresh the DNS information for the specified zone. */
public TaskHandle addZoneRefreshTask(String fullyQualifiedZoneName) {
return addToQueue(TargetType.ZONE, fullyQualifiedZoneName, fullyQualifiedZoneName);
}
/**
* Returns a batch of pending tasks.
*/
public List<TaskHandle> leaseTasks() {
return leaseTasks(null);
}
/**
* Returns a batch of pending tasks.
*
* @param tag the filter used to lease only those tasks that match
*/
public List<TaskHandle> leaseTasks(@Nullable String tag) {
try {
return isNullOrEmpty(tag)
? queue.leaseTasks(writeLockTimeout.getMillis(), MILLISECONDS, writeBatchSize)
: queue.leaseTasksByTag(writeLockTimeout.getMillis(), MILLISECONDS, writeBatchSize, tag);
} catch (TransientFailureException | DeadlineExceededException e) {
logger.severe(e, "Failed leasing tasks too fast");
return ImmutableList.of();
}
}
/** Reduce the task lease time to zero, making it immediately available to be leased again. */
public void dropTaskLease(TaskHandle task) {
try {
queue.modifyTaskLease(task, 0, TimeUnit.SECONDS);
} catch (IllegalStateException e) {
logger.warningfmt(e, "Failed dropping expired lease: %s", task.getName());
} catch (TransientFailureException | DeadlineExceededException e) {
logger.severe(e, "Failed dropping task leases too fast");
}
}
/** Delete the task, removing it from the queue permanently. */
public void deleteTask(TaskHandle task) {
try {
queue.deleteTask(task);
} catch (TransientFailureException | DeadlineExceededException e) {
logger.severe(e, "Failed deleting tasks too fast");
}
}
/** Delete a list of tasks, removing them from the queue permanently. */
public void deleteTasks(List<TaskHandle> tasks) {
try {
queue.deleteTask(tasks);
} catch (TransientFailureException | DeadlineExceededException e) {
logger.severe(e, "Failed deleting tasks too fast");
}
}
// TODO(b/19483428): Remove me when flows package is ported to Dagger.
/** Creates a new instance. */
public static DnsQueue create() {
DnsQueue result = new DnsQueue();
result.writeLockTimeout = Duration.standardSeconds(120);
result.queue = QueueFactory.getQueue(DNS_PULL_QUEUE_NAME);
return result;
}
}

View file

@ -0,0 +1,98 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns;
import static com.google.domain.registry.model.server.Lock.executeWithLocks;
import static com.google.domain.registry.request.Action.Method.POST;
import static com.google.domain.registry.util.CollectionUtils.nullToEmpty;
import com.google.common.net.InternetDomainName;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.dns.writer.api.DnsWriter;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.HttpException.ServiceUnavailableException;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.request.RequestParameters;
import com.google.domain.registry.util.DomainNameUtils;
import com.google.domain.registry.util.FormattingLogger;
import org.joda.time.Duration;
import java.util.Set;
import java.util.concurrent.Callable;
import javax.inject.Inject;
import javax.inject.Provider;
/** Task that sends domain and host updates to the DNS server. */
@Action(path = PublishDnsUpdatesAction.PATH, method = POST, automaticallyPrintOk = true)
public final class PublishDnsUpdatesAction implements Runnable, Callable<Void> {
public static final String PATH = "/_dr/task/publishDnsUpdates";
public static final String DOMAINS_PARAM = "domains";
public static final String HOSTS_PARAM = "hosts";
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@Inject DnsQueue dnsQueue;
@Inject Provider<DnsWriter> writerProvider;
@Inject @Config("dnsWriteLockTimeout") Duration timeout;
@Inject @Parameter(RequestParameters.PARAM_TLD) String tld;
@Inject @Parameter(DOMAINS_PARAM) Set<String> domains;
@Inject @Parameter(HOSTS_PARAM) Set<String> hosts;
@Inject PublishDnsUpdatesAction() {}
/** Runs the task. */
@Override
public void run() {
String lockName = String.format("DNS zone %s", tld);
// If executeWithLocks fails to get the lock, it does not throw an exception, simply returns
// false. We need to make sure to take note of this error; otherwise, a failed lock might result
// in the update task being dequeued and dropped. A message will already have been logged
// to indicate the problem.
if (!executeWithLocks(this, getClass(), tld, timeout, lockName)) {
throw new ServiceUnavailableException("Lock failure");
}
}
/** Runs the task, with the lock. */
@Override
public Void call() {
processBatch();
return null;
}
/** Steps through the domain and host refreshes contained in the parameters and processes them. */
private void processBatch() {
try (DnsWriter writer = writerProvider.get()) {
for (String domain : nullToEmpty(domains)) {
if (!DomainNameUtils.isUnder(
InternetDomainName.from(domain), InternetDomainName.from(tld))) {
logger.severefmt("%s: skipping domain %s not under tld", tld, domain);
} else {
writer.publishDomain(domain);
}
}
for (String host : nullToEmpty(hosts)) {
if (!DomainNameUtils.isUnder(
InternetDomainName.from(host), InternetDomainName.from(tld))) {
logger.severefmt("%s: skipping host %s not under tld", tld, host);
} else {
writer.publishHost(host);
}
}
}
}
}

View file

@ -0,0 +1,204 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns;
import static com.google.appengine.api.taskqueue.TaskOptions.Builder.withUrl;
import static com.google.common.collect.Sets.difference;
import static com.google.domain.registry.dns.DnsConstants.DNS_PUBLISH_PUSH_QUEUE_NAME;
import static com.google.domain.registry.dns.DnsConstants.DNS_TARGET_NAME_PARAM;
import static com.google.domain.registry.dns.DnsConstants.DNS_TARGET_TYPE_PARAM;
import static com.google.domain.registry.model.registry.Registries.getTlds;
import static java.util.concurrent.TimeUnit.SECONDS;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.common.base.Optional;
import com.google.common.collect.ComparisonChain;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.TreeMultimap;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.dns.DnsConstants.TargetType;
import com.google.domain.registry.model.registry.Registry;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.request.RequestParameters;
import com.google.domain.registry.util.FormattingLogger;
import com.google.domain.registry.util.TaskEnqueuer;
import java.io.UnsupportedEncodingException;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import javax.inject.Inject;
import javax.inject.Named;
/**
* Action for fanning out DNS refresh tasks by TLD, using data taken from the DNS pull queue.
*
* <h3>Parameters Reference</h3>
*
* <ul>
* <li>{@code jitterSeconds} Randomly delay each task by up to this many seconds.
* <li>{@code keepTasks} Do not delete any tasks from the pull queue, whether they are processed or
* not.
* </ul>
*/
@Action(path = "/_dr/cron/readDnsQueue", automaticallyPrintOk = true)
public final class ReadDnsQueueAction implements Runnable {
public static final String KEEP_TASKS_PARAM = "keepTasks";
private static final String JITTER_SECONDS_PARAM = "jitterSeconds";
private static final Random random = new Random();
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@Inject @Config("dnsTldUpdateBatchSize") int tldUpdateBatchSize;
@Inject @Named(DNS_PUBLISH_PUSH_QUEUE_NAME) Queue dnsPublishPushQueue;
@Inject @Parameter(JITTER_SECONDS_PARAM) Optional<Integer> jitterSeconds;
@Inject @Parameter(KEEP_TASKS_PARAM) boolean keepTasks;
@Inject DnsQueue dnsQueue;
@Inject TaskEnqueuer taskEnqueuer;
@Inject ReadDnsQueueAction() {}
/** Container for items we pull out of the DNS pull queue and process for fanout. */
private class RefreshItem implements Comparable<RefreshItem> {
final TargetType type;
final String name;
public RefreshItem(final TargetType type, final String name) {
this.type = type;
this.name = name;
}
@Override
public int compareTo(RefreshItem other) {
return ComparisonChain.start()
.compare(this.type, other.type)
.compare(this.name, other.name)
.result();
}
}
/** Leases all tasks from the pull queue and creates per-tld update actions for them. */
@Override
public void run() {
Set<String> tldsOfInterest = getTlds();
List<TaskHandle> tasks = dnsQueue.leaseTasks();
if (tasks.isEmpty()) {
return;
}
logger.infofmt("leased %d tasks", tasks.size());
// Normally, all tasks will be deleted from the pull queue. But some might have to remain if
// we are not interested in the associated TLD, or if the TLD is paused. Remember which these
// are.
Set<TaskHandle> tasksToKeep = new HashSet<>();
// The paused TLDs for which we found at least one refresh request.
Set<String> pausedTlds = new HashSet<>();
// Create a sorted multimap into which we will insert the refresh items, so that the items for
// each TLD will be grouped together, and domains and hosts will be grouped within a TLD. The
// grouping and ordering of domains and hosts is not technically necessary, but a predictable
// ordering makes it possible to write detailed tests.
TreeMultimap<String, RefreshItem> refreshItemMultimap = TreeMultimap.create();
// Read all tasks on the DNS pull queue and load them into the refresh item multimap.
for (TaskHandle task : tasks) {
try {
Map<String, String> params = ImmutableMap.copyOf(task.extractParams());
// Dual-read the TLD from either the parameter (new methodology) or the tag (old way).
// TODO(b/24564175): get the TLD from the regular parameter only.
String tld = task.getTag();
if (tld == null) {
tld = params.get(RequestParameters.PARAM_TLD);
}
if (tld == null) {
logger.severe("discarding invalid DNS refresh request; no TLD specified");
} else if (!tldsOfInterest.contains(tld)) {
tasksToKeep.add(task);
} else if (Registry.get(tld).getDnsPaused()) {
tasksToKeep.add(task);
pausedTlds.add(tld);
} else {
String typeString = params.get(DNS_TARGET_TYPE_PARAM);
String name = params.get(DNS_TARGET_NAME_PARAM);
if (typeString == null) {
logger.severe("discarding invalid DNS refresh request; no type specified");
} else if (name == null) {
logger.severe("discarding invalid DNS refresh request; no name specified");
} else {
TargetType type = TargetType.valueOf(typeString);
switch (type) {
case DOMAIN:
case HOST:
refreshItemMultimap.put(tld, new RefreshItem(type, name));
break;
default:
logger.severefmt("discarding DNS refresh request of type %s", typeString);
break;
}
}
}
} catch (UnsupportedEncodingException e) {
logger.severefmt(e, "discarding invalid DNS refresh request (task %s)", task);
}
}
if (!pausedTlds.isEmpty()) {
logger.infofmt("the dns-pull queue is paused for tlds: %s", pausedTlds);
}
// Loop through the multimap by TLD and generate refresh tasks for the hosts and domains.
for (Map.Entry<String, Collection<RefreshItem>> tldRefreshItemsEntry
: refreshItemMultimap.asMap().entrySet()) {
for (List<RefreshItem> chunk : Iterables.partition(
tldRefreshItemsEntry.getValue(), tldUpdateBatchSize)) {
TaskOptions options = withUrl(PublishDnsUpdatesAction.PATH)
.countdownMillis(jitterSeconds.isPresent()
? random.nextInt((int) SECONDS.toMillis(jitterSeconds.get()))
: 0)
.param(RequestParameters.PARAM_TLD, tldRefreshItemsEntry.getKey());
for (RefreshItem refreshItem : chunk) {
options.param(
(refreshItem.type == TargetType.HOST)
? PublishDnsUpdatesAction.HOSTS_PARAM : PublishDnsUpdatesAction.DOMAINS_PARAM,
refreshItem.name);
}
taskEnqueuer.enqueue(dnsPublishPushQueue, options);
}
}
Set<TaskHandle> tasksToDelete = difference(ImmutableSet.copyOf(tasks), tasksToKeep);
// In keepTasks mode, never delete any tasks.
if (keepTasks) {
logger.infofmt("would have deleted %d tasks", tasksToDelete.size());
for (TaskHandle task : tasks) {
dnsQueue.dropTaskLease(task);
}
// Otherwise, either delete or drop the lease of each task.
} else {
logger.infofmt("deleting %d tasks", tasksToDelete.size());
dnsQueue.deleteTasks(ImmutableList.copyOf(tasksToDelete));
logger.infofmt("dropping %d tasks", tasksToKeep.size());
for (TaskHandle task : tasksToKeep) {
dnsQueue.dropTaskLease(task);
}
logger.infofmt("done");
}
}
}

View file

@ -0,0 +1,80 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns;
import static com.google.domain.registry.model.EppResourceUtils.loadByUniqueId;
import com.google.domain.registry.dns.DnsConstants.TargetType;
import com.google.domain.registry.model.EppResource;
import com.google.domain.registry.model.domain.DomainResource;
import com.google.domain.registry.model.host.HostResource;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.HttpException.BadRequestException;
import com.google.domain.registry.request.HttpException.NotFoundException;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.util.Clock;
import javax.inject.Inject;
/** Endpoint for manually triggering refresh of DNS information. */
@Action(path = "/_dr/dnsRefresh", automaticallyPrintOk = true)
public final class RefreshDns implements Runnable {
@Inject Clock clock;
@Inject DnsQueue dnsQueue;
@Inject @Parameter("name") String domainOrHostName;
@Inject @Parameter("type") TargetType type;
@Inject RefreshDns() {}
@Override
public void run() {
if (!domainOrHostName.contains(".")) {
throw new BadRequestException("URL parameter 'name' must be fully qualified");
}
boolean domainLookup;
Class<? extends EppResource> clazz;
switch (type) {
case DOMAIN:
domainLookup = true;
clazz = DomainResource.class;
break;
case HOST:
domainLookup = false;
clazz = HostResource.class;
break;
default:
throw new BadRequestException("Unsupported type: " + type);
}
EppResource eppResource = loadByUniqueId(clazz, domainOrHostName, clock.nowUtc());
if (eppResource == null) {
throw new NotFoundException(
String.format("%s %s not found", type, domainOrHostName));
}
if (domainLookup) {
dnsQueue.addDomainRefreshTask(domainOrHostName);
} else {
if (((HostResource) eppResource).getSuperordinateDomain() == null) {
throw new BadRequestException(
String.format("%s isn't a subordinate hostname", domainOrHostName));
} else {
// Don't enqueue host refresh tasks for external hosts.
dnsQueue.addHostRefreshTask(domainOrHostName);
}
}
}
}

View file

@ -0,0 +1,158 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.domain.registry.dns.DnsConstants.DNS_TARGET_NAME_PARAM;
import static com.google.domain.registry.dns.DnsConstants.DNS_TARGET_TYPE_PARAM;
import static com.google.domain.registry.model.server.Lock.executeWithLocks;
import static com.google.domain.registry.request.Action.Method.POST;
import com.google.appengine.api.LifecycleManager;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.common.base.Throwables;
import com.google.common.net.InternetDomainName;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.dns.DnsConstants.TargetType;
import com.google.domain.registry.dns.writer.api.DnsWriter;
import com.google.domain.registry.model.registry.Registry;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.HttpException;
import com.google.domain.registry.request.HttpException.BadRequestException;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.request.RequestParameters;
import com.google.domain.registry.util.DomainNameUtils;
import com.google.domain.registry.util.FormattingLogger;
import org.joda.time.Duration;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import javax.inject.Inject;
import javax.inject.Provider;
/** Task that consumes pull-queue for zone updates to write to the DNS server. */
@Action(path = "/_dr/task/writeDns", method = POST, automaticallyPrintOk = true)
public final class WriteDnsTask implements Runnable, Callable<Void> {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@Inject DnsQueue dnsQueue;
@Inject Provider<DnsWriter> writerProvider;
@Inject @Config("dnsWriteLockTimeout") Duration timeout;
@Inject @Parameter(RequestParameters.PARAM_TLD) String tld;
@Inject WriteDnsTask() {}
/** Runs the task. */
@Override
public void run() {
String lockName = String.format("DNS zone %s", tld);
executeWithLocks(this, getClass(), tld, timeout, lockName);
}
/** Runs the task, with the lock. */
@Override
public Void call() {
processBatch();
return null;
}
/** Leases a batch of tasks tagged with the zone name from the pull queue and processes them. */
private void processBatch() {
if (LifecycleManager.getInstance().isShuttingDown()) {
logger.infofmt("%s: lifecycle manager is shutting down", tld);
return;
}
if (Registry.get(tld).getDnsPaused()) {
logger.infofmt("%s: the dns-pull queue is paused", tld);
return;
}
// Make a defensive copy to allow mutations.
List<TaskHandle> tasks = new ArrayList<>(dnsQueue.leaseTasks(tld));
if (tasks.isEmpty()) {
logger.infofmt("%s: no tasks in the dns-pull queue", tld);
return;
}
try (DnsWriter writer = writerProvider.get()) {
Iterator<TaskHandle> it = tasks.iterator();
while (it.hasNext()) {
TaskHandle task = it.next();
try {
processTask(writer, task, tld);
} catch (UnsupportedOperationException e) {
// Handle fatal errors by deleting the task.
logger.severefmt(e, "%s: deleting unsupported task %s", tld, task.toString());
dnsQueue.deleteTask(task);
it.remove();
}
}
} catch (RuntimeException e) {
Throwables.propagateIfInstanceOf(e, HttpException.class);
// Handle transient errors by dropping the task leases.
logger.severefmt(e, "%s: dropping leases of failed tasks", tld);
for (TaskHandle task : tasks) {
dnsQueue.dropTaskLease(task);
}
return;
}
for (TaskHandle task : tasks) {
dnsQueue.deleteTask(task);
}
logger.infofmt("%s: batch of %s tasks processed", tld, tasks.size());
}
/** Stages a write to authoritative DNS for this task. */
private static void processTask(DnsWriter writer, TaskHandle task, String tld) {
Map<String, String> params = new HashMap<>();
try {
for (Map.Entry<String, String> entry : task.extractParams()) {
params.put(entry.getKey(), entry.getValue());
}
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
TargetType type = TargetType.valueOf(params.get(DNS_TARGET_TYPE_PARAM));
String name = checkNotNull(params.get(DNS_TARGET_NAME_PARAM));
switch (type) {
case DOMAIN:
checkRequestArgument(
DomainNameUtils.isUnder(InternetDomainName.from(name), InternetDomainName.from(tld)),
"domain name %s is not under tld %s", name, tld);
writer.publishDomain(name);
break;
case HOST:
checkRequestArgument(
DomainNameUtils.isUnder(InternetDomainName.from(name), InternetDomainName.from(tld)),
"host name %s is not under tld %s", name, tld);
writer.publishHost(name);
break;
default:
// TODO(b/11592394): Write a full zone.
throw new UnsupportedOperationException(String.format("unexpected Type: %s", type));
}
}
private static void checkRequestArgument(boolean condition, String format, Object... args) {
if (!condition) {
throw new BadRequestException(String.format(format, args));
}
}
}

View file

@ -0,0 +1,15 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
java_library(
name = "api",
srcs = glob(["*.java"]),
deps = [
"//java/com/google/common/base",
"//third_party/java/dagger",
"//third_party/java/jsr305_annotations",
"//third_party/java/jsr330_inject",
],
)

View file

@ -0,0 +1,48 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns.writer.api;
/**
* Transaction object for sending an atomic batch of updates for a single zone to the DNS server.
*
* <p>Here's an example of how you would publish updates for a domain and host:
* <pre>
* &#064;Inject Provider&lt;DnsWriter&gt; dnsWriter;
* try (DnsWriter writer = dnsWriter.get()) {
* writer.publishDomain(domainName);
* writer.publishHost(hostName);
* }
* </pre>
*/
public interface DnsWriter extends AutoCloseable {
/**
* Loads {@code domainName} from datastore and publishes its NS/DS records to the DNS server.
*
* @param domainName the fully qualified domain name
*/
void publishDomain(String domainName);
/**
* Loads {@code hostName} from datastore and publishes its A/AAAA glue records to the DNS server.
*
* @param hostName the fully qualified host name
*/
void publishHost(String hostName);
/** Commits the updates to the DNS server atomically. */
@Override
void close();
}

View file

@ -0,0 +1,24 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns.writer.api;
import java.lang.annotation.Documented;
import javax.inject.Qualifier;
/** Dagger qualifier for the fully-qualified zone name that's being updated. */
@Qualifier
@Documented
public @interface DnsWriterZone {}

View file

@ -0,0 +1,49 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns.writer.api;
import com.google.common.base.Joiner;
import java.util.HashSet;
import java.util.Set;
import java.util.logging.Logger;
/**
* {@link DnsWriter} that doesn't actually update records in a DNS server.
*
* <p>All this class does is write its displeasure to the logs.
*/
public final class VoidDnsWriter implements DnsWriter {
private static final Logger logger = Logger.getLogger(VoidDnsWriter.class.getName());
private final Set<String> names = new HashSet<>();
@Override
public void publishDomain(String domainName) {
names.add(domainName);
}
@Override
public void publishHost(String hostName) {
names.add(hostName);
}
@Override
public void close() {
logger.warning("Ignoring DNS zone updates! No DnsWriterFactory implementation specified!\n"
+ Joiner.on('\n').join(names));
}
}

View file

@ -0,0 +1,28 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns.writer.api;
import dagger.Module;
import dagger.Provides;
/** Dagger module that disables DNS updates. */
@Module
public final class VoidDnsWriterModule {
@Provides
static DnsWriter provideDnsWriter() {
return new VoidDnsWriter();
}
}

View file

@ -0,0 +1,4 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)

View file

@ -0,0 +1,5 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)

View file

@ -0,0 +1,4 @@
<?xml version="1.0" encoding="utf-8" standalone="no"?>
<appengine-application xmlns="http://appengine.google.com/ns/1.0">
<application>domain-registry</application>
</appengine-application>

View file

@ -0,0 +1,34 @@
<?xml version="1.0"
encoding="UTF-8"?>
<application
xmlns="http://java.sun.com/xml/ns/javaee"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://java.sun.com/xml/ns/javaee
http://java.sun.com/xml/ns/javaee/application_5.xsd"
version="5">
<description>Google Registry</description>
<display-name>Google Registry</display-name>
<!-- Modules -->
<!-- The default module should be listed first -->
<module>
<web>
<web-uri>default</web-uri>
<context-root>default</context-root>
</web>
</module>
<module>
<web>
<web-uri>backend</web-uri>
<context-root>backend</context-root>
</web>
</module>
<module>
<web>
<web-uri>tools</web-uri>
<context-root>tools</context-root>
</web>
</module>
</application>

View file

@ -0,0 +1,7 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
exports_files(glob(["WEB-INF/*"]))

View file

@ -0,0 +1,13 @@
# A default java.util.logging configuration.
# (All App Engine logging is through java.util.logging by default).
#
# To use this configuration, copy it into your application's WEB-INF
# folder and add the following to your appengine-web.xml:
#
# <system-properties>
# <property name="java.util.logging.config.file" value="WEB-INF/logging.properties"/>
# </system-properties>
#
# Set the default logging level for all loggers to INFO.
.level = INFO

View file

@ -0,0 +1,499 @@
<?xml version="1.0" encoding="UTF-8"?>
<web-app xmlns="http://java.sun.com/xml/ns/javaee" version="2.5"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://java.sun.com/xml/ns/javaee
http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd">
<!-- Servlets -->
<!-- RDE -->
<servlet>
<description>
Responsible for scanning the database to create a full deposit for a single TLD
and streaming it to cloud storage. Requests are sent here by App Engine after
`RdeCreateCronServlet` enqueues a task specifying a URL that points to this servlet.
</description>
<display-name>RDE Staging Task</display-name>
<servlet-name>rdeStagingTask</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>rdeStagingTask</servlet-name>
<url-pattern>/_dr/task/rdeStaging</url-pattern>
</servlet-mapping>
<servlet>
<description>
Once `rdeCreateFullCron` finishes writing a deposit file to cloud storage, it'll
launch this task with the cloud filename so it can be uploaded to Iron Mountain
via SFTP. The file is deleted once the upload completes. This should be run via
`rde-upload-backend`.
</description>
<display-name>RDE Upload Task</display-name>
<servlet-name>rdeUploadTask</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>rdeUploadTask</servlet-name>
<url-pattern>/_dr/task/rdeUpload</url-pattern>
</servlet-mapping>
<servlet>
<description>
Sends an XML RDE report to ICANN's HTTP server after rdeUploadTask finishes.
</description>
<display-name>RDE Report Task</display-name>
<servlet-name>rdeReportTask</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>rdeReportTask</servlet-name>
<url-pattern>/_dr/task/rdeReport</url-pattern>
</servlet-mapping>
<servlet>
<description>
Bulk Registration Data Access. This task creates a thin escrow deposit
and saves it to cloud storage, where a separate script owned by the SREs
uploads it to ICANN.
</description>
<display-name>BRDA Copy Task</display-name>
<servlet-name>brdaCopyTask</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>brdaCopyTask</servlet-name>
<url-pattern>/_dr/task/brdaCopy</url-pattern>
</servlet-mapping>
<!-- Trademark Clearinghouse -->
<servlet>
<description>Downloads TMCH DNL data from MarksDB.</description>
<display-name>DNL Downloader</display-name>
<servlet-name>tmchDnl</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>tmchDnl</servlet-name>
<url-pattern>/_dr/task/tmchDnl</url-pattern>
</servlet-mapping>
<servlet>
<description>Downloads TMCH SMDRL data from MarksDB.</description>
<display-name>SMDRL Downloader</display-name>
<servlet-name>tmchSmdrl</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>tmchSmdrl</servlet-name>
<url-pattern>/_dr/task/tmchSmdrl</url-pattern>
</servlet-mapping>
<servlet>
<description>Downloads TMCH CRL data from MarksDB.</description>
<display-name>CRL Downloader</display-name>
<servlet-name>tmchCrl</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>tmchCrl</servlet-name>
<url-pattern>/_dr/task/tmchCrl</url-pattern>
</servlet-mapping>
<servlet>
<description>
Reads the LORDN queues and uploads CSV data for sunrise and claims marks to MarksDB.
</description>
<display-name>NORDN Upload Task</display-name>
<servlet-name>nordnUploadTask</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>nordnUploadTask</servlet-name>
<url-pattern>/_dr/task/nordnUpload</url-pattern>
</servlet-mapping>
<servlet>
<description>Verifies upload of LORDN data to MarksDB.</description>
<display-name> NORDN Verify Task</display-name>
<servlet-name>nordnVerifyTask</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>nordnVerifyTask</servlet-name>
<url-pattern>/_dr/task/nordnVerify</url-pattern>
</servlet-mapping>
<!-- TODO(b/24564175): Remove this entry. -->
<servlet>
<description>Write DNS updates.</description>
<display-name>WriteDnsServlet</display-name>
<servlet-name>writeDns</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>writeDns</servlet-name>
<url-pattern>/_dr/task/writeDns</url-pattern>
</servlet-mapping>
<servlet>
<description>
Read the DNS push and pull queues and kick off the appropriate tasks to update zone.</description>
<display-name>Read DNS Queue</display-name>
<servlet-name>readDnsQueue</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>readDnsQueue</servlet-name>
<url-pattern>/_dr/cron/readDnsQueue</url-pattern>
</servlet-mapping>
<servlet>
<description>Publish DNS updates.</description>
<display-name>Publish DNS Updates</display-name>
<servlet-name>publishDnsUpdates</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>publishDnsUpdates</servlet-name>
<url-pattern>/_dr/task/publishDnsUpdates</url-pattern>
</servlet-mapping>
<servlet>
<description>
Endpoint to manually refresh DNS information.
</description>
<display-name>DNS Refresh</display-name>
<servlet-name>dns-refresh</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>dns-refresh</servlet-name>
<url-pattern>/_dr/dnsRefresh</url-pattern>
</servlet-mapping>
<servlet>
<description>Export a datastore backup snapshot to GCS.</description>
<display-name>Export snapshot to GCS</display-name>
<servlet-name>exportSnapshot</servlet-name>
<servlet-class>com.google.domain.registry.export.ExportSnapshotServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>exportSnapshot</servlet-name>
<url-pattern>/_dr/task/exportSnapshot</url-pattern>
</servlet-mapping>
<servlet>
<description>Check the completion of a datastore backup snapshot.</description>
<display-name>Check on snapshot status</display-name>
<servlet-name>checkSnapshot</servlet-name>
<servlet-class>com.google.domain.registry.export.CheckSnapshotServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>checkSnapshot</servlet-name>
<url-pattern>/_dr/task/checkSnapshot</url-pattern>
</servlet-mapping>
<servlet>
<description>Load a datastore backup snapshot into BigQuery.</description>
<display-name>Load snapshot into BigQuery</display-name>
<servlet-name>loadSnapshot</servlet-name>
<servlet-class>com.google.domain.registry.export.LoadSnapshotServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>loadSnapshot</servlet-name>
<url-pattern>/_dr/task/loadSnapshot</url-pattern>
</servlet-mapping>
<servlet>
<description>Update a view to point at a certain snapshot in BigQuery.</description>
<display-name>Update snapshot view in BigQuery</display-name>
<servlet-name>updateSnapshotView</servlet-name>
<servlet-class>com.google.domain.registry.export.UpdateSnapshotViewServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>updateSnapshotView</servlet-name>
<url-pattern>/_dr/task/updateSnapshotView</url-pattern>
</servlet-mapping>
<servlet>
<description>Polls state of jobs in Bigquery</description>
<display-name>Bigquery Job Poll Task</display-name>
<servlet-name>poll-bigquery-job</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>poll-bigquery-job</servlet-name>
<url-pattern>/_dr/task/pollBigqueryJob</url-pattern>
</servlet-mapping>
<servlet>
<description>Fan out a cron task over an adjustable range of TLDs.</description>
<display-name>TLD Cron Fanout</display-name>
<servlet-name>tld-fanout</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>tld-fanout</servlet-name>
<url-pattern>/_dr/cron/fanout</url-pattern>
</servlet-mapping>
<!-- Backups. -->
<servlet>
<description>Fan out a cron task over all commit log buckets.</description>
<display-name>Commit Log Bucket Cron Fanout</display-name>
<servlet-name>commit-log-fanout</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>commit-log-fanout</servlet-name>
<url-pattern>/_dr/cron/commitLogFanout</url-pattern>
</servlet-mapping>
<servlet>
<description>Deletes old commit logs from datastore.</description>
<display-name>Delete Old Commit Logs</display-name>
<servlet-name>delete-commits</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>delete-commits</servlet-name>
<url-pattern>/_dr/task/deleteOldCommitLogs</url-pattern>
</servlet-mapping>
<servlet>
<description>Checkpoint commit logs.</description>
<servlet-name>checkpoint-commit-logs</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>checkpoint-commit-logs</servlet-name>
<url-pattern>/_dr/cron/commitLogCheckpoint</url-pattern>
</servlet-mapping>
<servlet>
<description>Export commit log diff.</description>
<servlet-name>export-commit-log-diff</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>export-commit-log-diff</servlet-name>
<url-pattern>/_dr/task/exportCommitLogDiff</url-pattern>
</servlet-mapping>
<servlet>
<description>Delete EppResources, children, and indices.</description>
<servlet-name>kill-epp-resources</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>kill-epp-resources</servlet-name>
<url-pattern>/_dr/task/killAllEppResources</url-pattern>
</servlet-mapping>
<servlet>
<description>Restore commit logs.</description>
<servlet-name>restore-commit-logs</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>restore-commit-logs</servlet-name>
<url-pattern>/_dr/task/restoreCommitLogs</url-pattern>
</servlet-mapping>
<servlet>
<description>Delete commit logs.</description>
<servlet-name>kill-commit-logs</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>kill-commit-logs</servlet-name>
<url-pattern>/_dr/task/killCommitLogs</url-pattern>
</servlet-mapping>
<!-- MapReduce servlet. -->
<servlet>
<servlet-name>mapreduce</servlet-name>
<servlet-class>com.google.appengine.tools.mapreduce.MapReduceServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>mapreduce</servlet-name>
<url-pattern>/_dr/mapreduce/*</url-pattern>
</servlet-mapping>
<!-- Pipeline GUI servlets. -->
<servlet>
<servlet-name>pipeline</servlet-name>
<servlet-class>com.google.appengine.tools.pipeline.impl.servlets.PipelineServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>pipeline</servlet-name>
<url-pattern>/_ah/pipeline/*</url-pattern>
</servlet-mapping>
<!-- Synchronize registrars spreadsheet task. -->
<servlet>
<servlet-name>sync-registrars-sheet</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>sync-registrars-sheet</servlet-name>
<url-pattern>/_dr/task/syncRegistrarsSheet</url-pattern>
</servlet-mapping>
<!-- TLD reserved terms exporting servlet. -->
<servlet>
<servlet-name>export-reserved-terms</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>export-reserved-terms</servlet-name>
<url-pattern>/_dr/task/exportReservedTerms</url-pattern>
</servlet-mapping>
<!-- RegistrarContact changes syncing to Google Groups servlet. -->
<servlet>
<servlet-name>sync-group-members</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>sync-group-members</servlet-name>
<url-pattern>/_dr/task/syncGroupMembers</url-pattern>
</servlet-mapping>
<!-- Mapreduce to delete the specified contact resource if it is not referenced by any domains. -->
<servlet>
<description>
Deletes the specified contact resource if it is not referenced by any domains.
</description>
<display-name>Delete Contact Resource</display-name>
<servlet-name>delete-contact-resource</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>delete-contact-resource</servlet-name>
<url-pattern>/_dr/task/deleteContactResource</url-pattern>
</servlet-mapping>
<!-- Mapreduce to delete the specified host resource if it is not referenced by any domains. -->
<servlet>
<description>
Deletes the specified host resource if it is not referenced by any domains.
</description>
<display-name>Delete Host Resource</display-name>
<servlet-name>delete-host-resource</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>delete-host-resource</servlet-name>
<url-pattern>/_dr/task/deleteHostResource</url-pattern>
</servlet-mapping>
<!-- Mapreduce to enqueue DNS update tasks following a host rename. -->
<servlet>
<description>
Enqueues DNS update tasks following a host rename
</description>
<display-name>DNS Refresh For Host Rename</display-name>
<servlet-name>dns-refresh-for-host-rename</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>dns-refresh-for-host-rename</servlet-name>
<url-pattern>/_dr/task/dnsRefreshForHostRename</url-pattern>
</servlet-mapping>
<!-- Security config -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Internal</web-resource-name>
<description>
Any request path starting with `/_dr/` will be restricted to requests originating
from the backend or by anyone authenticated to a Google account that's listed in
the AppEngine control panel settings for this project as a Viewer/Owner/Developer.
The `_dr` is short for Domain Registry to follow AppEngine naming conventions.
</description>
<url-pattern>/_dr/*</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- Require TLS on all requests. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Secure</web-resource-name>
<description>
Require encryption for all paths. http URLs will be redirected to https.
</description>
<url-pattern>/*</url-pattern>
</web-resource-collection>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- Shut down external access to registrar console. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Internal</web-resource-name>
<description>
Go home rogue registrar!
</description>
<url-pattern>/registrar*</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- Restrict access to source code. -->
<!-- These are only included in the default module war, but restricting here too for safety. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Internal</web-resource-name>
<description>No soup for you!</description>
<url-pattern>/assets/sources/*</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- See: https://code.google.com/p/objectify-appengine/wiki/Setup -->
<filter>
<filter-name>ObjectifyFilter</filter-name>
<filter-class>com.googlecode.objectify.ObjectifyFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>ObjectifyFilter</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
<!-- Register types with Objectify. -->
<filter>
<filter-name>OfyFilter</filter-name>
<filter-class>com.google.domain.registry.model.ofy.OfyFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>OfyFilter</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
</web-app>

View file

@ -0,0 +1,7 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
exports_files(glob(["WEB-INF/*"]))

View file

@ -0,0 +1,136 @@
<datastore-indexes autoGenerate="false">
<!-- For finding contact resources by registrar. -->
<datastore-index kind="ContactResource" ancestor="false" source="manual">
<property name="currentSponsorClientId" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- TODO(b/19035583): Remove sharedFields indexes. -->
<datastore-index kind="ContactResource" ancestor="false" source="manual">
<property name="sharedFields.currentSponsorClientId" direction="asc"/>
<property name="sharedFields.deletionTime" direction="asc"/>
</datastore-index>
<!-- For finding domain resources by registrar. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="^i" direction="asc"/>
<property name="currentSponsorClientId" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- TODO(b/19035583): Remove sharedFields indexes. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="^i" direction="asc"/>
<property name="sharedFields.currentSponsorClientId" direction="asc"/>
<property name="sharedFields.deletionTime" direction="asc"/>
</datastore-index>
<!-- For finding domain resources by tld. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="^i" direction="asc"/>
<property name="tld" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- TODO(b/19035583): Remove sharedFields indexes. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="^i" direction="asc"/>
<property name="tld" direction="asc"/>
<property name="sharedFields.deletionTime" direction="asc"/>
</datastore-index>
<!-- For finding domain resources by registrar. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="currentSponsorClientId" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- TODO(b/19035583): Remove sharedFields indexes. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="sharedFields.currentSponsorClientId" direction="asc"/>
<property name="sharedFields.deletionTime" direction="asc"/>
</datastore-index>
<!-- For finding host resources by registrar. -->
<datastore-index kind="HostResource" ancestor="false" source="manual">
<property name="currentSponsorClientId" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- TODO(b/19035583): Remove sharedFields indexes. -->
<datastore-index kind="HostResource" ancestor="false" source="manual">
<property name="sharedFields.currentSponsorClientId" direction="asc"/>
<property name="sharedFields.deletionTime" direction="asc"/>
</datastore-index>
<!-- For finding account balance of Registrar and viewing billing history. -->
<datastore-index kind="RegistrarBillingEntry" ancestor="true" source="manual">
<property name="currency" direction="asc"/>
<property name="created" direction="desc"/>
</datastore-index>
<!-- For determining the active domains linked to a given contact. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="allContacts.contactId.linked" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- TODO(b/19035583): Remove sharedFields indexes. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="allContacts.contactId.linked" direction="asc"/>
<property name="sharedFields.deletionTime" direction="asc"/>
</datastore-index>
<!-- For determining the active domains linked to a given host. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="nameservers.linked" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- TODO(b/19035583): Remove sharedFields indexes. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="nameservers.linked" direction="asc"/>
<property name="sharedFields.deletionTime" direction="asc"/>
</datastore-index>
<!-- For updating domains and applications after a host rename. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="^i" direction="asc"/>
<property name="nameservers.linked" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- TODO(b/19035583): Remove sharedFields indexes. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="^i" direction="asc"/>
<property name="nameservers.linked" direction="asc"/>
<property name="sharedFields.deletionTime" direction="asc"/>
</datastore-index>
<!-- For Whois ip lookup -->
<datastore-index kind="HostResource" ancestor="false" source="manual">
<property name="inetAddresses" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- TODO(b/19035583): Remove sharedFields indexes. -->
<datastore-index kind="HostResource" ancestor="false" source="manual">
<property name="inetAddresses" direction="asc"/>
<property name="sharedFields.deletionTime" direction="asc"/>
</datastore-index>
<!-- For Poll -->
<datastore-index kind="PollMessage" ancestor="false" source="manual">
<property name="clientId" direction="asc"/>
<property name="eventTime" direction="asc"/>
</datastore-index>
<datastore-index kind="PollMessage" ancestor="true" source="manual">
<property name="clientId" direction="asc"/>
<property name="eventTime" direction="asc"/>
</datastore-index>
<!-- For the history viewer. -->
<datastore-index kind="HistoryEntry" ancestor="true" source="manual">
<property name="modificationTime" direction="asc"/>
</datastore-index>
<!-- For RDAP. -->
<!-- TODO(b/25644770): Backfill these indexes. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="^i" direction="asc"/>
<property name="fullyQualifiedDomainName" direction="asc"/>
</datastore-index>
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="^i" direction="asc"/>
<property name="tld" direction="asc"/>
<property name="fullyQualifiedDomainName" direction="asc"/>
</datastore-index>
<datastore-index kind="HostResource" ancestor="false" source="manual">
<property name="deletionTime" direction="asc"/>
<property name="fullyQualifiedHostName" direction="asc"/>
</datastore-index>
<!-- TODO(b/19035583): Remove sharedFields indexes. -->
<datastore-index kind="HostResource" ancestor="false" source="manual">
<property name="sharedFields.deletionTime" direction="asc"/>
<property name="fullyQualifiedHostName" direction="asc"/>
</datastore-index>
</datastore-indexes>

View file

@ -0,0 +1,16 @@
<?xml version="1.0" encoding="UTF-8"?>
<blacklistentries>
<!-- Example IPv4 CIDR Subnet
<blacklist>
<subnet>1.2.3.4/24</subnet>
<description>An IPv4 subnet</description>
</blacklist> -->
<!-- Example IPv6 CIDR Subnet
<blacklist>
<subnet>abcd::123:4567/48</subnet>
<description>An IPv6 subnet</description>
</blacklist> -->
</blacklistentries>

View file

@ -0,0 +1,13 @@
# A default java.util.logging configuration.
# (All App Engine logging is through java.util.logging by default).
#
# To use this configuration, copy it into your application's WEB-INF
# folder and add the following to your appengine-web.xml:
#
# <system-properties>
# <property name="java.util.logging.config.file" value="WEB-INF/logging.properties"/>
# </system-properties>
#
# Set the default logging level for all loggers to INFO.
.level = INFO

View file

@ -0,0 +1,288 @@
<?xml version="1.0" encoding="UTF-8"?>
<queue-entries>
<queue>
<name>default</name>
<rate>1/s</rate>
<bucket-size>5</bucket-size>
</queue>
<queue>
<name>dns-cron</name>
<!-- There is no point allowing more than 10/s because the pull queue that feeds
this job will refuse to service more than 10 qps. See
https://cloud.google.com/appengine/docs/java/javadoc/com/google/appengine/api/taskqueue/Queue#leaseTasks-long-java.util.concurrent.TimeUnit-long- -->
<rate>10/s</rate>
<bucket-size>100</bucket-size>
<retry-parameters>
<task-retry-limit>1</task-retry-limit>
</retry-parameters>
</queue>
<queue>
<name>dns-pull</name>
<mode>pull</mode>
</queue>
<queue>
<name>dns-publish</name>
<rate>100/s</rate>
<bucket-size>100</bucket-size>
</queue>
<queue>
<name>export</name>
<rate>10/s</rate>
<bucket-size>100</bucket-size>
<retry-parameters>
<task-retry-limit>1</task-retry-limit>
</retry-parameters>
</queue>
<!-- Queue for launching asynchronous actions (e.g. mapreduces) from async flows. -->
<queue>
<name>flows-async</name>
<!-- Note: rate-limiting a bit here because each of these will kick off an MR.
TODO(b/26140521): do more intelligent/aggressive batching than this. -->
<rate>1/m</rate>
<!-- Async flow tasks should run on the backend module. -->
<target>backend</target>
</queue>
<queue>
<name>delete-commits</name>
<rate>10/s</rate>
<bucket-size>100</bucket-size>
<retry-parameters>
<task-retry-limit>1</task-retry-limit>
</retry-parameters>
</queue>
<queue>
<name>export-commits</name>
<rate>10/s</rate>
<bucket-size>100</bucket-size>
<retry-parameters>
<!-- Retry aggressively since a single delayed export increases our time window of
unrecoverable data loss in the event of a datastore failure. -->
<min-backoff-seconds>1</min-backoff-seconds>
<max-backoff-seconds>60</max-backoff-seconds>
<!-- No age limit; a failed export should be retried as long as possible to avoid
having data missing from our exported commit log record. -->
</retry-parameters>
</queue>
<!-- Queue for jobs to export reserved terms to Google Drive for a TLD. -->
<queue>
<name>export-reserved-terms</name>
<rate>1/s</rate>
<bucket-size>100</bucket-size>
<retry-parameters>
<task-retry-limit>3</task-retry-limit>
</retry-parameters>
</queue>
<!-- Queue for jobs to sync RegistrarContact changes to Google Groups. -->
<queue>
<name>group-members-sync</name>
<rate>1/s</rate>
<bucket-size>100</bucket-size>
<retry-parameters>
<task-retry-limit>3</task-retry-limit>
</retry-parameters>
</queue>
<!-- Queue for polling export BigQuery jobs for completion. -->
<queue>
<name>export-bigquery-poll</name>
<!-- Limit queue to 5 concurrent tasks and 5 per second to avoid hitting BigQuery quotas. -->
<rate>5/s</rate>
<bucket-size>5</bucket-size>
<max-concurrent-requests>5</max-concurrent-requests>
<!-- Check every 20s and increase interval to every 5 minutes. -->
<retry-parameters>
<min-backoff-seconds>20</min-backoff-seconds>
<max-backoff-seconds>300</max-backoff-seconds>
<max-doublings>2</max-doublings>
</retry-parameters>
</queue>
<!-- Queue for launching new snapshots and for triggering the initial BigQuery load jobs. -->
<queue>
<name>export-snapshot</name>
<rate>5/m</rate>
<retry-parameters>
<!-- Should be less than the exportSnapshot cron interval; see cron.xml. -->
<task-age-limit>22h</task-age-limit>
<!-- Retry starting at a 5m interval and increasing up to a 30m interval. -->
<min-backoff-seconds>300</min-backoff-seconds>
<max-backoff-seconds>1800</max-backoff-seconds>
<task-retry-limit>10</task-retry-limit>
</retry-parameters>
</queue>
<!-- Queue for polling managed backup snapshots for completion. -->
<queue>
<name>export-snapshot-poll</name>
<rate>5/m</rate>
<retry-parameters>
<!-- Should be less than the exportSnapshot cron interval; see cron.xml. -->
<task-age-limit>22h</task-age-limit>
<!-- Retry starting at a 1m interval and increasing up to a 5m interval. -->
<min-backoff-seconds>60</min-backoff-seconds>
<max-backoff-seconds>300</max-backoff-seconds>
</retry-parameters>
</queue>
<!-- Queue for updating BigQuery views after a snapshot kind's load job completes. -->
<queue>
<name>export-snapshot-update-view</name>
<rate>1/s</rate>
<retry-parameters>
<!-- Should be less than the exportSnapshot cron interval; see cron.xml. -->
<task-age-limit>22h</task-age-limit>
<!-- Retry starting at a 10s interval and increasing up to a 1m interval. -->
<min-backoff-seconds>10</min-backoff-seconds>
<max-backoff-seconds>60</max-backoff-seconds>
<task-retry-limit>10</task-retry-limit>
</retry-parameters>
</queue>
<queue>
<name>mail</name>
<rate>5/m</rate>
<bucket-size>10</bucket-size>
</queue>
<queue>
<name>rde-upload</name>
<rate>1/m</rate>
<max-concurrent-requests>1</max-concurrent-requests>
<retry-parameters>
<task-age-limit>4h</task-age-limit>
</retry-parameters>
</queue>
<queue>
<name>rde-report</name>
<rate>1/s</rate>
<max-concurrent-requests>1</max-concurrent-requests>
<retry-parameters>
<task-age-limit>4h</task-age-limit>
</retry-parameters>
</queue>
<queue>
<name>rde-staging</name>
<rate>1/m</rate>
<max-concurrent-requests>10</max-concurrent-requests>
<retry-parameters>
<task-age-limit>4h</task-age-limit>
</retry-parameters>
</queue>
<queue>
<name>brda</name>
<rate>1/m</rate>
<max-concurrent-requests>10</max-concurrent-requests>
<retry-parameters>
<task-age-limit>23h</task-age-limit>
</retry-parameters>
</queue>
<!-- Queue for tasks that communicate with TMCH MarksDB webserver. -->
<!-- TODO(b/17623181): Delete this once the queue implementation is live and working. -->
<queue>
<name>marksdb</name>
<rate>1/m</rate>
<max-concurrent-requests>1</max-concurrent-requests>
<retry-parameters>
<task-age-limit>11h</task-age-limit> <!-- cron interval minus hour -->
</retry-parameters>
</queue>
<!-- Queue for tasks to produce LORDN CSV reports, either by by the query or queue method. -->
<queue>
<name>nordn</name>
<rate>1/s</rate>
<max-concurrent-requests>10</max-concurrent-requests>
<retry-parameters>
<task-age-limit>11h</task-age-limit> <!-- cron interval minus hour -->
</retry-parameters>
</queue>
<!-- Queue for LORDN Claims CSV rows to be periodically queried and then uploaded in batches. -->
<queue>
<name>lordn-claims</name>
<mode>pull</mode>
</queue>
<!-- Queue for LORDN Sunrise CSV rows to be periodically queried and then uploaded in batches. -->
<queue>
<name>lordn-sunrise</name>
<mode>pull</mode>
</queue>
<!-- Queue used by the MapReduce library for running tasks.
Do not re-use this queue for tasks that our code creates (e.g. tasks to launch MapReduces
that aren't themselves part of a running MapReduce).-->
<queue>
<name>mapreduce</name>
<!-- Warning: DO NOT SET A <target> parameter for this queue. See b/24782801 for why. -->
<rate>500/s</rate>
<bucket-size>100</bucket-size>
</queue>
<!-- Queue for tasks that sync data to Google Spreadsheets. -->
<queue>
<name>sheet</name>
<rate>1/s</rate>
<!-- max-concurrent-requests is intentionally omitted. -->
<retry-parameters>
<task-age-limit>1h</task-age-limit>
</retry-parameters>
</queue>
<!-- queue for whitebox metrics -->
<queue>
<name>bigquery-streaming-metrics</name>
<rate>500/s</rate>
<bucket-size>500</bucket-size>
<retry-parameters>
<task-retry-limit>1</task-retry-limit>
<task-age-limit>1m</task-age-limit>
</retry-parameters>
</queue>
<queue>
<name>load0</name>
<rate>500/s</rate>
<bucket-size>500</bucket-size>
</queue>
<queue>
<name>load1</name>
<rate>500/s</rate>
<bucket-size>500</bucket-size>
</queue>
<queue>
<name>load2</name>
<rate>500/s</rate>
<bucket-size>500</bucket-size>
</queue>
<queue>
<name>load3</name>
<rate>500/s</rate>
<bucket-size>500</bucket-size>
</queue>
<queue>
<name>load4</name>
<rate>500/s</rate>
<bucket-size>500</bucket-size>
</queue>
</queue-entries>

View file

@ -0,0 +1,375 @@
<?xml version="1.0" encoding="UTF-8"?>
<web-app xmlns="http://java.sun.com/xml/ns/javaee" version="2.5"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://java.sun.com/xml/ns/javaee
http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd">
<!-- Servlets -->
<servlet>
<description>
This is the primary EPP endpoint for the Registry. It accepts
EPP XHRs from our TLS proxy.
</description>
<display-name>EPP</display-name>
<servlet-name>epp</servlet-name>
<servlet-class>com.google.domain.registry.flows.EppTlsServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>epp</servlet-name>
<url-pattern>/_dr/epp</url-pattern>
</servlet-mapping>
<!-- Registrar Console -->
<servlet>
<description>
Registrar Console UI servlet.
</description>
<display-name>Registrar Console UI</display-name>
<servlet-name>registrar-ui</servlet-name>
<servlet-class>com.google.domain.registry.ui.server.registrar.ConsoleUiServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>registrar-ui</servlet-name>
<url-pattern>/registrar</url-pattern>
</servlet-mapping>
<servlet>
<description>
Registrar Console XHR servlet. Accepts EPP XHRs from GAE GAIA-authenticated frontend sessions.
</description>
<display-name>Registrar Console XHR</display-name>
<servlet-name>registrar-xhr</servlet-name>
<servlet-class>com.google.domain.registry.flows.EppConsoleServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>registrar-xhr</servlet-name>
<url-pattern>/registrar-xhr</url-pattern>
</servlet-mapping>
<servlet>
<display-name>Registrar Self-serve Settings</display-name>
<servlet-name>registrar-settings</servlet-name>
<servlet-class>com.google.domain.registry.ui.server.registrar.RegistrarServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>registrar-settings</servlet-name>
<url-pattern>/registrar-settings</url-pattern>
</servlet-mapping>
<servlet>
<display-name>Registrar Braintree Payment Form Setup</display-name>
<servlet-name>registrar-payment-setup</servlet-name>
<servlet-class>com.google.domain.registry.module.frontend.FrontendServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>registrar-payment-setup</servlet-name>
<url-pattern>/registrar-payment-setup</url-pattern>
</servlet-mapping>
<servlet>
<display-name>Registrar Braintree Payment</display-name>
<servlet-name>registrar-payment</servlet-name>
<servlet-class>com.google.domain.registry.module.frontend.FrontendServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>registrar-payment</servlet-name>
<url-pattern>/registrar-payment</url-pattern>
</servlet-mapping>
<!-- Admin Console -->
<servlet>
<display-name>Admin UI Servlet</display-name>
<servlet-name>admin-ui</servlet-name>
<servlet-class>com.google.domain.registry.ui.server.admin.AdminUiServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>admin-ui</servlet-name>
<url-pattern>/_dr/admin</url-pattern>
</servlet-mapping>
<servlet>
<display-name>Registry Admin</display-name>
<servlet-name>registry-admin</servlet-name>
<servlet-class>com.google.domain.registry.ui.server.admin.RegistryServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>registry-admin</servlet-name>
<url-pattern>/_dr/admin/registry/*</url-pattern>
</servlet-mapping>
<servlet>
<display-name>Registrar Admin</display-name>
<servlet-name>registrar-admin</servlet-name>
<servlet-class>com.google.domain.registry.ui.server.admin.RegistrarServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>registrar-admin</servlet-name>
<url-pattern>/_dr/admin/registrar/*</url-pattern>
</servlet-mapping>
<!-- WHOIS -->
<servlet>
<description>
HTTP WHOIS API.
</description>
<display-name>WHOIS</display-name>
<servlet-name>whois</servlet-name>
<servlet-class>com.google.domain.registry.module.frontend.FrontendServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>whois</servlet-name>
<url-pattern>/whois/*</url-pattern>
</servlet-mapping>
<servlet>
<description>
WHOIS Protocol API.
</description>
<display-name>WHOIS Protocol</display-name>
<servlet-name>whois-protocol</servlet-name>
<servlet-class>com.google.domain.registry.module.frontend.FrontendServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>whois-protocol</servlet-name>
<url-pattern>/_dr/whois</url-pattern>
</servlet-mapping>
<!-- RDAP (new WHOIS) -->
<servlet>
<description>
RDAP API.
</description>
<display-name>RDAP</display-name>
<servlet-name>rdap</servlet-name>
<servlet-class>com.google.domain.registry.module.frontend.FrontendServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>rdap</servlet-name>
<url-pattern>/rdap/*</url-pattern>
</servlet-mapping>
<!-- Public API to do availability checks -->
<servlet>
<description>
Availability Check API.
</description>
<display-name>Availability Check</display-name>
<servlet-name>check</servlet-name>
<servlet-class>com.google.domain.registry.ui.server.api.CheckApiServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>check</servlet-name>
<url-pattern>/check</url-pattern>
</servlet-mapping>
<!-- Whitebox Metrics servlet. -->
<servlet>
<servlet-name>metrics</servlet-name>
<servlet-class>com.google.domain.registry.monitoring.whitebox.MetricsTaskServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>metrics</servlet-name>
<url-pattern>/_dr/task/metrics</url-pattern>
</servlet-mapping>
<!-- Security config -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Internal</web-resource-name>
<description>
Any request path starting with `/_dr/` will be restricted to requests originating
from the backend or by anyone authenticated to a Google account that's listed in
the AppEngine control panel settings for this project as a Viewer/Owner/Developer.
The `_dr` is short for Domain Registry to follow AppEngine naming conventions.
</description>
<url-pattern>/_dr/*</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
</security-constraint>
<security-constraint>
<web-resource-collection>
<web-resource-name>profile-registrar-xhr</web-resource-name>
<description>
Only allow logged-in users to even try to issue EPP commands. This is an additional
layer of safety on top of in-servlet authentication and XSRF protection.
</description>
<url-pattern>/registrar-xhr</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>*</role-name>
</auth-constraint>
</security-constraint>
<security-constraint>
<web-resource-collection>
<web-resource-name>profile-registrar-settings</web-resource-name>
<description>
Only allow logged-in users to even try to change registrar settings. This is an additional
layer of safety on top of in-servlet authentication and XSRF protection.
</description>
<url-pattern>/registrar-settings</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>*</role-name>
</auth-constraint>
</security-constraint>
<security-constraint>
<web-resource-collection>
<web-resource-name>profile-registrar-payment</web-resource-name>
<description>
Only allow logged-in users to even try to change registrar settings. This is an additional
layer of safety on top of in-servlet authentication and XSRF protection.
</description>
<url-pattern>/registrar-payment</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>*</role-name>
</auth-constraint>
</security-constraint>
<security-constraint>
<web-resource-collection>
<web-resource-name>profile-registrar-payment-token</web-resource-name>
<description>
Only allow logged-in users to even try to change registrar settings. This is an additional
layer of safety on top of in-servlet authentication and XSRF protection.
</description>
<url-pattern>/registrar-payment-token</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>*</role-name>
</auth-constraint>
</security-constraint>
<!-- Require TLS on all requests. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Secure</web-resource-name>
<description>
Require encryption for all paths. http URLs will be redirected to https.
</description>
<url-pattern>/*</url-pattern>
</web-resource-collection>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- Restrict access to source code. -->
<!-- This directory contains all the JavaScript sources verbatim. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>internal-sources</web-resource-name>
<description>No soup for you!</description>
<url-pattern>/assets/sources/*</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
</security-constraint>
<!-- TODO(b/26776367): Move these files to /assets/sources. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>admin-bin-js-map</web-resource-name>
<description>No soup for you!</description>
<url-pattern>/assets/js/admin_bin.js.map</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
</security-constraint>
<security-constraint>
<web-resource-collection>
<web-resource-name>admin-dbg-js</web-resource-name>
<description>No soup for you!</description>
<url-pattern>/assets/js/admin_dbg.js</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
</security-constraint>
<security-constraint>
<web-resource-collection>
<web-resource-name>registrar-bin-js-map</web-resource-name>
<description>No soup for you!</description>
<url-pattern>/assets/js/registrar_bin.js.map</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
</security-constraint>
<security-constraint>
<web-resource-collection>
<web-resource-name>registrar-dbg-js</web-resource-name>
<description>No soup for you!</description>
<url-pattern>/assets/js/registrar_dbg.js</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
</security-constraint>
<security-constraint>
<web-resource-collection>
<web-resource-name>brain-bin-js-map</web-resource-name>
<description>No soup for you!</description>
<url-pattern>/assets/js/brain_bin.js.map</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
</security-constraint>
<security-constraint>
<web-resource-collection>
<web-resource-name>admin-dbg-css</web-resource-name>
<description>No soup for you!</description>
<url-pattern>/assets/css/admin_dbg.css</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
</security-constraint>
<security-constraint>
<web-resource-collection>
<web-resource-name>registrar-dbg-css</web-resource-name>
<description>No soup for you!</description>
<url-pattern>/assets/css/registrar_dbg.css</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
</security-constraint>
<!-- See: https://code.google.com/p/objectify-appengine/wiki/Setup -->
<filter>
<filter-name>ObjectifyFilter</filter-name>
<filter-class>com.googlecode.objectify.ObjectifyFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>ObjectifyFilter</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
<!-- Register types with Objectify. -->
<filter>
<filter-name>OfyFilter</filter-name>
<filter-class>com.google.domain.registry.model.ofy.OfyFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>OfyFilter</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
</web-app>

View file

@ -0,0 +1,7 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
exports_files(glob(["WEB-INF/*"]))

View file

@ -0,0 +1,13 @@
# A default java.util.logging configuration.
# (All App Engine logging is through java.util.logging by default).
#
# To use this configuration, copy it into your application's WEB-INF
# folder and add the following to your appengine-web.xml:
#
# <system-properties>
# <property name="java.util.logging.config.file" value="WEB-INF/logging.properties"/>
# </system-properties>
#
# Set the default logging level for all loggers to INFO.
.level = INFO

View file

@ -0,0 +1,284 @@
<?xml version="1.0" encoding="UTF-8"?>
<web-app xmlns="http://java.sun.com/xml/ns/javaee" version="2.5"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://java.sun.com/xml/ns/javaee
http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd">
<!-- Servlets -->
<servlet>
<display-name>Verify OTE</display-name>
<servlet-name>verify-ote</servlet-name>
<servlet-class>com.google.domain.registry.ui.server.admin.VerifyOteServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>verify-ote</servlet-name>
<url-pattern>/_dr/admin/verifyOte</url-pattern>
</servlet-mapping>
<servlet>
<display-name>Create Groups for Registrar</display-name>
<servlet-name>create-groups</servlet-name>
<servlet-class>com.google.domain.registry.module.tools.ToolsServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>create-groups</servlet-name>
<url-pattern>/_dr/admin/createGroups</url-pattern>
</servlet-mapping>
<servlet>
<display-name>Create Premium List</display-name>
<servlet-name>create-premium-list</servlet-name>
<servlet-class>com.google.domain.registry.module.tools.ToolsServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>create-premium-list</servlet-name>
<url-pattern>/_dr/admin/createPremiumList</url-pattern>
</servlet-mapping>
<servlet>
<display-name>List Entities</display-name>
<servlet-name>list-entities</servlet-name>
<servlet-class>com.google.domain.registry.module.tools.ToolsServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>list-entities</servlet-name>
<url-pattern>/_dr/admin/list/*</url-pattern>
</servlet-mapping>
<servlet>
<display-name>Delete Entity</display-name>
<servlet-name>delete-entity</servlet-name>
<servlet-class>com.google.domain.registry.module.tools.ToolsServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>delete-entity</servlet-name>
<url-pattern>/_dr/admin/deleteEntity</url-pattern>
</servlet-mapping>
<servlet>
<display-name>Update Premium List</display-name>
<servlet-name>update-premium-list</servlet-name>
<servlet-class>com.google.domain.registry.module.tools.ToolsServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>update-premium-list</servlet-name>
<url-pattern>/_dr/admin/updatePremiumList</url-pattern>
</servlet-mapping>
<servlet>
<description>Backend endpoint to initate loadtests.</description>
<display-name>Load Test</display-name>
<servlet-name>loadtest</servlet-name>
<servlet-class>com.google.domain.registry.module.tools.ToolsServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>loadtest</servlet-name>
<url-pattern>/_dr/loadtest</url-pattern>
</servlet-mapping>
<!-- Command line tool uses this endpoint to modify the datastore. -->
<servlet>
<display-name>Remote API Servlet</display-name>
<servlet-name>RemoteApiServlet</servlet-name>
<servlet-class>com.google.apphosting.utils.remoteapi.RemoteApiServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>RemoteApiServlet</servlet-name>
<url-pattern>/remote_api</url-pattern>
</servlet-mapping>
<!-- ExecuteEppCommand uses this to execute remotely. -->
<servlet>
<description>
Execute epp from the registry tool.
</description>
<display-name>Registry tool EPP endpoint</display-name>
<servlet-name>epptool</servlet-name>
<servlet-class>com.google.domain.registry.flows.EppToolServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>epptool</servlet-name>
<url-pattern>/_dr/epptool</url-pattern>
</servlet-mapping>
<!-- Mapreduce to delete all prober data. -->
<servlet>
<description>
Deletes all billing events, history entries, domains, poll messages, foreign key indexes, and eCatcher entities in prober namespaces.
</description>
<display-name>Delete Prober Data Mapreduce</display-name>
<servlet-name>delete-prober-data</servlet-name>
<!-- TODO(b/27309488): maybe move this to the backend module + BackendServlet. -->
<servlet-class>com.google.domain.registry.module.tools.ToolsServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>delete-prober-data</servlet-name>
<url-pattern>/_dr/task/deleteProberData</url-pattern>
</servlet-mapping>
<!-- Mapreduce to delete all entities in non-default namespaces. -->
<servlet>
<description>
Deletes entities in non-default namespaces.
</description>
<display-name>Annihilate Non-Default Namespaces Mapreduce</display-name>
<servlet-name>annihilate-non-default-namespaces</servlet-name>
<servlet-class>com.google.domain.registry.module.tools.ToolsServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>annihilate-non-default-namespaces</servlet-name>
<url-pattern>/_dr/task/annihilateNonDefaultNamespaces</url-pattern>
</servlet-mapping>
<!-- Mapreduce to re-save all EppResources. -->
<servlet>
<description>
Re-saves all EppResources.
</description>
<display-name>Re-Save All EPP Resources</display-name>
<servlet-name>resave-all-eppresources</servlet-name>
<servlet-class>com.google.domain.registry.module.tools.ToolsServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>resave-all-eppresources</servlet-name>
<url-pattern>/_dr/task/resaveAllEppResources</url-pattern>
</servlet-mapping>
<!-- This path serves up the App Engine results page for mapreduce runs. -->
<servlet>
<servlet-name>mapreduce</servlet-name>
<servlet-class>com.google.appengine.tools.mapreduce.MapReduceServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>mapreduce</servlet-name>
<url-pattern>/_dr/mapreduce/*</url-pattern>
</servlet-mapping>
<!-- Pipeline GUI servlets. -->
<servlet>
<servlet-name>pipeline</servlet-name>
<servlet-class>com.google.appengine.tools.pipeline.impl.servlets.PipelineServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>pipeline</servlet-name>
<url-pattern>/_ah/pipeline/*</url-pattern>
</servlet-mapping>
<!-- Registrar detail report publishing action. -->
<servlet>
<servlet-name>publish-detail-report</servlet-name>
<servlet-class>com.google.domain.registry.module.tools.ToolsServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>publish-detail-report</servlet-name>
<url-pattern>/_dr/publishDetailReport</url-pattern>
</servlet-mapping>
<servlet>
<servlet-name>generate-zone-files</servlet-name>
<servlet-class>com.google.domain.registry.module.tools.ToolsServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>generate-zone-files</servlet-name>
<url-pattern>/_dr/task/generateZoneFiles</url-pattern>
</servlet-mapping>
<!-- Security config -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Internal</web-resource-name>
<description>
Any request path starting with `/_dr/` will be restricted to requests originating
from the backend or by anyone authenticated to a Google account that's listed in
the AppEngine control panel settings for this project as a Viewer/Owner/Developer.
The `_dr` is short for Domain Registry to follow AppEngine naming conventions.
</description>
<url-pattern>/_dr/*</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- Require TLS on all requests. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Secure</web-resource-name>
<description>
Require encryption for all paths. http URLs will be redirected to https.
</description>
<url-pattern>/*</url-pattern>
</web-resource-collection>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- Shut down external access to registrar console. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Internal</web-resource-name>
<description>
Go home rogue registrar!
</description>
<url-pattern>/registrar*</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- Restrict access to source code. -->
<!-- These are only included in the default module war, but restricting here too for safety. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Internal</web-resource-name>
<description>No soup for you!</description>
<url-pattern>/assets/sources/*</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- See: https://code.google.com/p/objectify-appengine/wiki/Setup -->
<filter>
<filter-name>ObjectifyFilter</filter-name>
<filter-class>com.googlecode.objectify.ObjectifyFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>ObjectifyFilter</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
<!-- Register types with Objectify. -->
<filter>
<filter-name>OfyFilter</filter-name>
<filter-class>com.google.domain.registry.model.ofy.OfyFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>OfyFilter</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
</web-app>

View file

@ -0,0 +1,4 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)

View file

@ -0,0 +1,5 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)

View file

@ -0,0 +1,33 @@
<?xml version="1.0" encoding="UTF-8"?>
<appengine-web-app xmlns="http://appengine.google.com/ns/1.0">
<application>domain-registry</application>
<version>1</version>
<module>backend</module>
<threadsafe>true</threadsafe>
<sessions-enabled>true</sessions-enabled>
<instance-class>B4_1G</instance-class>
<basic-scaling>
<max-instances>50</max-instances>
<idle-timeout>10m</idle-timeout>
</basic-scaling>
<system-properties>
<property name="java.util.logging.config.file"
value="WEB-INF/logging.properties"/>
<property name="com.google.domain.registry.environment"
value="production"/>
</system-properties>
<static-files>
<include path="/*.html" expiration="1d"/>
<include path="/assets/js/**" expiration="1d"/>
<include path="/assets/css/**" expiration="1d"/>
<include path="/assets/images/**" expiration="1d"/>
</static-files>
<!-- Prevent uncaught servlet errors from leaking a stack trace. -->
<static-error-handlers>
<handler file="error.html"/>
</static-error-handlers>
</appengine-web-app>

View file

@ -0,0 +1,5 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)

View file

@ -0,0 +1,34 @@
<?xml version="1.0" encoding="UTF-8"?>
<appengine-web-app xmlns="http://appengine.google.com/ns/1.0">
<application>domain-registry</application>
<version>1</version>
<module>default</module>
<threadsafe>true</threadsafe>
<sessions-enabled>true</sessions-enabled>
<instance-class>B4_1G</instance-class>
<basic-scaling>
<max-instances>50</max-instances>
<idle-timeout>10m</idle-timeout>
</basic-scaling>
<system-properties>
<property name="java.util.logging.config.file"
value="WEB-INF/logging.properties"/>
<property name="com.google.domain.registry.environment"
value="production"/>
</system-properties>
<static-files>
<include path="/*.html" expiration="1d"/>
<include path="/assets/js/**" expiration="1d"/>
<include path="/assets/css/**" expiration="1d"/>
<include path="/assets/images/**" expiration="1d"/>
<include path="/assets/sources/**" expiration="1d"/>
</static-files>
<!-- Prevent uncaught servlet errors from leaking a stack trace. -->
<static-error-handlers>
<handler file="error.html"/>
</static-error-handlers>
</appengine-web-app>

View file

@ -0,0 +1,193 @@
<?xml version="1.0" encoding="UTF-8"?>
<cronentries>
<!--
/cron/fanout params:
queue=<QUEUE_NAME>
endpoint=<ENDPOINT_NAME> // URL Path of servlet, which may contain placeholders:
// :tld - Replaced with the TLD, e.g. foo, soy
// :registrar - Replaced with registrar clientId
runInEmpty // Run in the empty namespace
forEachRealTld // Run for tlds with getTldType() == TldType.REAL
forEachTestTld // Run for tlds with getTldType() == TldType.TEST
exclude=TLD1[&exclude=TLD2] // exclude something otherwise included
-->
<cron>
<url>/_dr/task/rdeStaging</url>
<description>
This job generates a full RDE escrow deposit as a single gigantic XML document
and streams it to cloud storage. When this job has finished successfully, it'll
launch a separate task that uploads the deposit file to Iron Mountain via SFTP.
</description>
<!--
This only needs to run once per day, but we launch additional jobs in case the
cursor is lagging behind, so it'll catch up to the current date as quickly as
possible. The only job that'll run under normal circumstances is the one that's
close to midnight, since if the cursor is up-to-date, the task is a no-op.
We want it to be close to midnight because that reduces the chance that the
point-in-time code won't have to go to the extra trouble of fetching old
versions of objects from the datastore. However, we don't want it to run too
close to midnight, because there's always a chance that a change which was
timestamped before midnight hasn't fully been committed to the datastore. So
we add a 4+ minute grace period to ensure the transactions cool down, since
our queries are not transactional.
-->
<schedule>every 4 hours from 00:07 to 20:00</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=rde-upload&endpoint=/_dr/task/rdeUpload&forEachRealTld]]></url>
<description>
This job is a no-op unless RdeUploadCursor falls behind for some reason.
</description>
<schedule>every 4 hours synchronized</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=rde-report&endpoint=/_dr/task/rdeReport&forEachRealTld]]></url>
<description>
This job is a no-op unless RdeReportCursor falls behind for some reason.
</description>
<schedule>every 4 hours synchronized</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=marksdb&endpoint=/_dr/task/tmchDnl&runInEmpty]]></url>
<description>
This job downloads the latest DNL from MarksDB and inserts it into the database.
(See: TmchDnlServlet, ClaimsList)
</description>
<schedule>every 12 hours synchronized</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=marksdb&endpoint=/_dr/task/tmchSmdrl&runInEmpty]]></url>
<description>
This job downloads the latest SMDRL from MarksDB and inserts it into the database.
(See: TmchSmdrlServlet, SignedMarkRevocationList)
</description>
<schedule>every 12 hours synchronized</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=marksdb&endpoint=/_dr/task/tmchCrl&runInEmpty]]></url>
<description>
This job downloads the latest CRL from MarksDB and inserts it into the database.
(See: TmchCrlServlet)
</description>
<schedule>every 12 hours synchronized</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=group-members-sync&endpoint=/_dr/task/syncGroupMembers&runInEmpty]]></url>
<description>
Syncs RegistrarContact changes in the past hour to Google Groups.
</description>
<schedule>every 1 hours synchronized</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=sheet&endpoint=/_dr/task/syncRegistrarsSheet&runInEmpty]]></url>
<description>
Synchronize Registrar entities to Google Spreadsheets.
</description>
<schedule>every 1 hours synchronized</schedule>
<target>backend</target>
</cron>
<!-- TODO(b/23319222): Re-enable when fixed.
<cron>
<url><![CDATA[/_dr/cron/commitLogFanout?queue=delete-commits&endpoint=/_dr/task/deleteOldCommitLogs&jitterSeconds=600]]></url>
<description>
This job deletes commit logs from datastore that are old, e.g. thirty days.
</description>
<schedule>every 20 minutes synchronized</schedule>
<target>backend</target>
</cron>
-->
<cron>
<url><![CDATA[/_dr/cron/commitLogCheckpoint]]></url>
<description>
This job checkpoints the commit log buckets and exports the diff since last checkpoint to GCS.
</description>
<schedule>every 1 minutes synchronized</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=export-snapshot&endpoint=/_dr/task/exportSnapshot&runInEmpty]]></url>
<description>
This job fires off a datastore backup-as-a-service job that generates snapshot files in GCS.
It also enqueues a new task to wait on the completion of that job and then load the resulting
snapshot into bigquery.
</description>
<!-- Keep the task-age-limit for this job's task queue less than this cron interval. -->
<schedule>every day 06:00</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=nordn&endpoint=/_dr/task/nordnUpload&forEachRealTld&lordn-phase=sunrise]]></url>
<description>
This job uploads LORDN Sunrise CSV files for each TLD to MarksDB. It should be
run at most every three hours, or at absolute minimum every 26 hours.
</description>
<!-- This may be set anywhere between "every 3 hours" and "every 25 hours". -->
<schedule>every 12 hours synchronized</schedule>
<timezone>UTC</timezone>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=nordn&endpoint=/_dr/task/nordnUpload&forEachRealTld&lordn-phase=claims]]></url>
<description>
This job uploads LORDN Claims CSV files for each TLD to MarksDB. It should be
run at most every three hours, or at absolute minimum every 26 hours.
</description>
<!-- This may be set anywhere between "every 3 hours" and "every 25 hours". -->
<schedule>every 12 hours synchronized</schedule>
<timezone>UTC</timezone>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/task/deleteProberData]]></url>
<description>
This job clears out data from probers and runs once a week.
</description>
<schedule>every monday 14:00</schedule>
<timezone>UTC</timezone>
<!-- TODO(b/27309488): maybe move this to the backend module. -->
<target>tools</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=export-reserved-terms&endpoint=/_dr/task/exportReservedTerms&forEachRealTld]]></url>
<description>
Reserved terms export to Google Drive job for creating once-daily exports.
</description>
<schedule>every day 05:30</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=dns-cron&endpoint=/_dr/task/writeDns&forEachRealTld&forEachTestTld&jitterSeconds=60]]></url>
<description>
Write updates to the DNS system. Lease tasks from the dns-pull queue until empty.
</description>
<schedule>every 1 minutes synchronized</schedule>
<target>backend</target>
</cron>
</cronentries>

View file

@ -0,0 +1,5 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)

View file

@ -0,0 +1,33 @@
<?xml version="1.0" encoding="UTF-8"?>
<appengine-web-app xmlns="http://appengine.google.com/ns/1.0">
<application>domain-registry</application>
<version>1</version>
<module>tools</module>
<threadsafe>true</threadsafe>
<sessions-enabled>true</sessions-enabled>
<instance-class>B4_1G</instance-class>
<basic-scaling>
<max-instances>50</max-instances>
<idle-timeout>10m</idle-timeout>
</basic-scaling>
<system-properties>
<property name="java.util.logging.config.file"
value="WEB-INF/logging.properties"/>
<property name="com.google.domain.registry.environment"
value="production"/>
</system-properties>
<static-files>
<include path="/*.html" expiration="1d"/>
<include path="/assets/js/**" expiration="1d"/>
<include path="/assets/css/**" expiration="1d"/>
<include path="/assets/images/**" expiration="1d"/>
</static-files>
<!-- Prevent uncaught servlet errors from leaking a stack trace. -->
<static-error-handlers>
<handler file="error.html"/>
</static-error-handlers>
</appengine-web-app>

View file

@ -0,0 +1,44 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
java_library(
name = "export",
srcs = glob(["*.java"]),
deps = [
"//apiserving/discoverydata/bigquery:bigqueryv2",
"//apiserving/discoverydata/drive",
"//java/com/google/api/client/extensions/appengine/http",
"//java/com/google/api/client/googleapis/extensions/appengine/auth/oauth2",
"//java/com/google/api/client/googleapis/json",
"//java/com/google/api/client/http",
"//java/com/google/api/client/json",
"//java/com/google/api/client/json/jackson2",
"//java/com/google/common/annotations",
"//java/com/google/common/base",
"//java/com/google/common/collect",
"//java/com/google/common/html",
"//java/com/google/common/io",
"//java/com/google/common/net",
"//java/com/google/domain/registry/bigquery",
"//java/com/google/domain/registry/config",
"//java/com/google/domain/registry/flows",
"//java/com/google/domain/registry/gcs",
"//java/com/google/domain/registry/groups",
"//java/com/google/domain/registry/model",
"//java/com/google/domain/registry/request",
"//java/com/google/domain/registry/security:servlets",
"//java/com/google/domain/registry/storage/drive",
"//java/com/google/domain/registry/util",
"//third_party/java/appengine:appengine-api",
"//third_party/java/appengine_gcs_client",
"//third_party/java/dagger",
"//third_party/java/joda_time",
"//third_party/java/json_simple",
"//third_party/java/jsr305_annotations",
"//third_party/java/jsr330_inject",
"//third_party/java/objectify:objectify-v4_1",
"//third_party/java/servlet/servlet_api",
],
)

View file

@ -0,0 +1,173 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static com.google.appengine.api.taskqueue.TaskOptions.Builder.withUrl;
import static com.google.domain.registry.bigquery.BigqueryUtils.toJobReferenceString;
import com.google.api.services.bigquery.Bigquery;
import com.google.api.services.bigquery.model.Job;
import com.google.api.services.bigquery.model.JobReference;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.appengine.api.taskqueue.TaskOptions.Method;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Header;
import com.google.domain.registry.request.HttpException.BadRequestException;
import com.google.domain.registry.request.HttpException.NotModifiedException;
import com.google.domain.registry.request.Payload;
import com.google.domain.registry.util.FormattingLogger;
import com.google.domain.registry.util.TaskEnqueuer;
import dagger.Lazy;
import org.joda.time.Duration;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import javax.inject.Inject;
/**
* An action which polls the state of a bigquery job. If it is completed then it will log its
* completion state; otherwise it will return a failure code so that the task will be retried.
*/
@Action(
path = BigqueryPollJobAction.PATH,
method = {Action.Method.GET, Action.Method.POST},
automaticallyPrintOk = true)
public class BigqueryPollJobAction implements Runnable {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
static final String QUEUE = "export-bigquery-poll"; // See queue.xml
static final String PATH = "/_dr/task/pollBigqueryJob"; // See web.xml
static final String CHAINED_TASK_QUEUE_HEADER = "X-DomainRegistry-ChainedTaskQueue";
static final String PROJECT_ID_HEADER = "X-DomainRegistry-ProjectId";
static final String JOB_ID_HEADER = "X-DomainRegistry-JobId";
static final Duration POLL_COUNTDOWN = Duration.standardSeconds(20);
@Inject Bigquery bigquery;
@Inject TaskEnqueuer enqueuer;
@Inject @Header(CHAINED_TASK_QUEUE_HEADER) Lazy<String> chainedQueueName;
@Inject @Header(PROJECT_ID_HEADER) String projectId;
@Inject @Header(JOB_ID_HEADER) String jobId;
@Inject @Payload byte[] payload;
@Inject BigqueryPollJobAction() {}
@Override
public void run() {
checkJobOutcome(); // Throws a NotModifiedException if the job hasn't completed.
if (payload == null || payload.length == 0) {
return;
}
// If there is a payload, it's a chained task, so enqueue it.
TaskOptions task;
try {
task = (TaskOptions) new ObjectInputStream(new ByteArrayInputStream(payload)).readObject();
} catch (ClassNotFoundException | IOException e) {
logger.severe(e, e.toString());
throw new BadRequestException("Cannot deserialize task from payload", e);
}
String taskName = enqueuer.enqueue(getQueue(chainedQueueName.get()), task).getName();
logger.infofmt(
"Added chained task %s for %s to queue %s: %s",
taskName,
task.getUrl(),
chainedQueueName.get(),
task.toString());
}
/**
* Returns true if the provided job succeeded, false if it failed, and throws an exception if it
* is still pending.
*/
private boolean checkJobOutcome() {
Job job = null;
String jobRefString =
toJobReferenceString(new JobReference().setProjectId(projectId).setJobId(jobId));
try {
job = bigquery.jobs().get(projectId, jobId).execute();
} catch (IOException e) {
// We will throw a new exception because done==false, but first log this exception.
logger.warning(e, e.getMessage());
}
// If job is not yet done, then throw an exception so that we'll return a failing HTTP status
// code and the task will be retried.
if (job == null || !job.getStatus().getState().equals("DONE")) {
throw new NotModifiedException(jobRefString);
}
// Check if the job ended with an error.
if (job.getStatus().getErrorResult() != null) {
logger.severefmt("Bigquery job failed - %s - %s", jobRefString, job);
return false;
}
logger.infofmt("Bigquery job succeeded - %s", jobRefString);
return true;
}
/** Helper class to enqueue a bigquery poll job. */
public static class BigqueryPollJobEnqueuer {
private final TaskEnqueuer enqueuer;
@Inject
BigqueryPollJobEnqueuer(TaskEnqueuer enqueuer) {
this.enqueuer = enqueuer;
}
/** Enqueue a task to poll for the success or failure of the referenced BigQuery job. */
public TaskHandle enqueuePollTask(JobReference jobRef) {
return enqueuer.enqueue(getQueue(QUEUE), createCommonPollTask(jobRef).method(Method.GET));
}
/**
* Enqueue a task to poll for the success or failure of the referenced BigQuery job and to
* launch the provided task in the specified queue if the job succeeds.
*/
public TaskHandle enqueuePollTask(
JobReference jobRef, TaskOptions chainedTask, Queue chainedTaskQueue) throws IOException {
// Serialize the chainedTask into a byte array to put in the task payload.
ByteArrayOutputStream taskBytes = new ByteArrayOutputStream();
new ObjectOutputStream(taskBytes).writeObject(chainedTask);
return enqueuer.enqueue(
getQueue(QUEUE),
createCommonPollTask(jobRef)
.method(Method.POST)
.header(CHAINED_TASK_QUEUE_HEADER, chainedTaskQueue.getQueueName())
.payload(taskBytes.toByteArray()));
}
/**
* Enqueue a task to poll for the success or failure of the referenced BigQuery job and to
* launch the provided task in the specified queue if the job succeeds.
*/
private static TaskOptions createCommonPollTask(JobReference jobRef) {
// Omit host header so that task will be run on the current backend/module.
return withUrl(PATH)
.countdownMillis(POLL_COUNTDOWN.getMillis())
.header(PROJECT_ID_HEADER, jobRef.getProjectId())
.header(JOB_ID_HEADER, jobRef.getJobId());
}
}
}

View file

@ -0,0 +1,148 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.common.base.CaseFormat.LOWER_UNDERSCORE;
import static com.google.common.base.CaseFormat.UPPER_UNDERSCORE;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.api.services.bigquery.model.TableFieldSchema;
import com.google.common.base.Function;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import com.google.domain.registry.bigquery.BigqueryUtils.FieldMode;
import com.google.domain.registry.bigquery.BigqueryUtils.FieldType;
import java.util.Arrays;
import java.util.List;
import java.util.Set;
/** Helper class which acts as a container for Bigquery table schemas. */
class BigquerySchemas {
private BigquerySchemas() {}
/** The fields in the "Logs" table. */
enum LogsTableField {
// These fields appear in nested APP_LOG_LINES records.
LOG_LEVEL(FieldType.STRING, FieldMode.NULLABLE),
LOG_MESSAGE(FieldType.STRING, FieldMode.NULLABLE),
TIME(FieldType.TIMESTAMP, FieldMode.NULLABLE),
// These fields appear in records at top level of the table.
START_TIME(FieldType.TIMESTAMP, FieldMode.NULLABLE),
END_TIME(FieldType.TIMESTAMP, FieldMode.NULLABLE),
RELEASE(FieldType.STRING, FieldMode.NULLABLE),
APPID(FieldType.STRING, FieldMode.NULLABLE),
COST(FieldType.FLOAT, FieldMode.NULLABLE),
EPP_CLIENT_ID(FieldType.STRING, FieldMode.NULLABLE),
EPP_COMMAND(FieldType.STRING, FieldMode.NULLABLE),
EPP_RESULT(FieldType.BOOLEAN, FieldMode.NULLABLE),
EPP_TARGET(FieldType.STRING, FieldMode.REPEATED),
EPP_TLD(FieldType.STRING, FieldMode.NULLABLE),
HOST(FieldType.STRING, FieldMode.NULLABLE),
HTTP_VERSION(FieldType.STRING, FieldMode.NULLABLE),
INSTANCE_KEY(FieldType.STRING, FieldMode.NULLABLE),
IP(FieldType.STRING, FieldMode.NULLABLE),
LATENCY_USEC(FieldType.INTEGER, FieldMode.NULLABLE),
MCYCLES(FieldType.INTEGER, FieldMode.NULLABLE),
METHOD(FieldType.STRING, FieldMode.NULLABLE),
MODULE_ID(FieldType.STRING, FieldMode.NULLABLE),
NICKNAME(FieldType.STRING, FieldMode.NULLABLE),
OFFSET(FieldType.STRING, FieldMode.NULLABLE),
PENDING_TIME_USEC(FieldType.INTEGER, FieldMode.NULLABLE),
REFERRER(FieldType.STRING, FieldMode.NULLABLE),
REPLICA_INDEX(FieldType.INTEGER, FieldMode.NULLABLE),
REQUEST_ID(FieldType.STRING, FieldMode.NULLABLE),
RESOURCE(FieldType.STRING, FieldMode.NULLABLE),
RESPONSE_SIZE(FieldType.INTEGER, FieldMode.NULLABLE),
STATUS(FieldType.INTEGER, FieldMode.NULLABLE),
TASK_NAME(FieldType.STRING, FieldMode.NULLABLE),
TASK_QUEUE_NAME(FieldType.STRING, FieldMode.NULLABLE),
URL_MAP_ENTRY(FieldType.STRING, FieldMode.NULLABLE),
USER_AGENT(FieldType.STRING, FieldMode.NULLABLE),
VERSION_ID(FieldType.STRING, FieldMode.NULLABLE),
APP_LOG_LINES(FieldType.RECORD, FieldMode.REPEATED,
ImmutableList.of(LOG_LEVEL, LOG_MESSAGE, TIME));
private final FieldType fieldType;
private final FieldMode fieldMode;
private final ImmutableList<LogsTableField> childFields;
LogsTableField(FieldType fieldType, FieldMode fieldMode) {
this(fieldType, fieldMode, ImmutableList.<LogsTableField>of());
}
LogsTableField(
FieldType fieldType, FieldMode fieldMode, ImmutableList<LogsTableField> childFields) {
this.fieldType = checkNotNull(fieldType);
this.fieldMode = checkNotNull(fieldMode);
this.childFields = checkNotNull(childFields);
}
/** Return the name of the field as it should appear in the Bigquery schema. */
String schemaName() {
return UPPER_UNDERSCORE.to(LOWER_UNDERSCORE, name());
}
/** Return the {@link TableFieldSchema} of this field for use in a Bigquery table. */
private TableFieldSchema getTableFieldSchema() {
TableFieldSchema tableFieldSchema = new TableFieldSchema()
.setName(schemaName())
.setType(fieldType.schemaName())
.setMode(fieldMode.schemaName());
if (!childFields.isEmpty()) {
tableFieldSchema.setFields(getSchema(childFields));
}
return tableFieldSchema;
}
/**
* Return the schema of a list of {@link TableFieldSchema} objects for use in a Bigquery table.
*/
private static List<TableFieldSchema> getSchema(Iterable<LogsTableField> fields) {
return FluentIterable.from(fields)
.transform(new Function<LogsTableField, TableFieldSchema>() {
@Override
public TableFieldSchema apply(LogsTableField field) {
return field.getTableFieldSchema();
}})
.toList();
}
/**
* Return the schema of this table for use in a Bigquery table.
*/
static List<TableFieldSchema> getTableSchema() {
List<LogsTableField> allFields = Arrays.asList(LogsTableField.values());
// Collect the list of all child fields so we can exclude them from the list of fields at the
// top level of the schema.
Set<LogsTableField> childFields = FluentIterable.from(allFields)
.transformAndConcat(new Function<LogsTableField, List<LogsTableField>>() {
@Override
public List<LogsTableField> apply(LogsTableField field) {
return field.childFields;
}})
.toSet();
Set<LogsTableField> topLevelFields =
Sets.difference(ImmutableSet.copyOf(allFields), childFields);
return getSchema(topLevelFields);
}
}
}

View file

@ -0,0 +1,170 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.common.base.MoreObjects.firstNonNull;
import static com.google.common.html.HtmlEscapers.htmlEscaper;
import static com.google.domain.registry.util.HttpServletUtils.getRequiredParameterValue;
import static javax.servlet.http.HttpServletResponse.SC_ACCEPTED;
import static javax.servlet.http.HttpServletResponse.SC_BAD_REQUEST;
import static javax.servlet.http.HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
import static javax.servlet.http.HttpServletResponse.SC_NOT_MODIFIED;
import static javax.servlet.http.HttpServletResponse.SC_OK;
import com.google.appengine.api.taskqueue.QueueFactory;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.appengine.api.taskqueue.TaskOptions.Method;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import com.google.common.net.MediaType;
import com.google.domain.registry.export.DatastoreBackupInfo.BackupStatus;
import com.google.domain.registry.util.FormattingLogger;
import com.google.domain.registry.util.NonFinalForTesting;
import org.joda.time.Duration;
import org.joda.time.PeriodType;
import org.joda.time.format.PeriodFormat;
import java.io.IOException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/** Check the status of a snapshot, and if complete, trigger loading it into BigQuery. */
public class CheckSnapshotServlet extends HttpServlet {
/** Parameter names for passing parameters into this servlet. */
static final String SNAPSHOT_NAME_PARAM = "name";
static final String SNAPSHOT_KINDS_TO_LOAD_PARAM = "kindsToLoad";
/** Servlet-specific details needed for enqueuing tasks against itself. */
static final String QUEUE = "export-snapshot-poll"; // See queue.xml.
static final String PATH = "/_dr/task/checkSnapshot"; // See web.xml.
static final Duration POLL_COUNTDOWN = Duration.standardMinutes(2);
/** The maximum amount of time we allow a backup to run before abandoning it. */
static final Duration MAXIMUM_BACKUP_RUNNING_TIME = Duration.standardHours(20);
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@NonFinalForTesting
private static DatastoreBackupService backupService = DatastoreBackupService.get();
@NonFinalForTesting
private static LoadSnapshotServlet loadSnapshotServlet = new LoadSnapshotServlet();
@Override
public void service(HttpServletRequest req, HttpServletResponse rsp) throws IOException {
try {
rsp.setStatus(SC_OK);
rsp.setContentType(MediaType.PLAIN_TEXT_UTF_8.toString());
rsp.getWriter().write("OK\n\n");
super.service(req, rsp);
} catch (Throwable e) {
logger.severe(e, e.toString());
rsp.sendError(
e instanceof IllegalArgumentException ? SC_BAD_REQUEST : SC_INTERNAL_SERVER_ERROR,
htmlEscaper().escape(firstNonNull(e.getMessage(), e.toString())));
}
}
@Override
public void doGet(HttpServletRequest req, HttpServletResponse rsp) throws IOException {
String snapshotName = getRequiredParameterValue(req, SNAPSHOT_NAME_PARAM);
rsp.getWriter().write(backupService.findByName(snapshotName).getInformation());
}
@Override
public void doPost(HttpServletRequest req, HttpServletResponse rsp) throws IOException {
String snapshotName = getRequiredParameterValue(req, SNAPSHOT_NAME_PARAM);
// TODO(b/19237926): make this non-optional once all new tasks will have this parameter.
String kindsToLoadParam = req.getParameter(SNAPSHOT_KINDS_TO_LOAD_PARAM);
Optional<ImmutableSet<String>> kindsToLoad = Optional.fromNullable(
kindsToLoadParam == null ? null
: ImmutableSet.copyOf(Splitter.on(',').split(kindsToLoadParam)));
// Look up the backup by the provided name, stopping if we can't find it.
DatastoreBackupInfo backup;
try {
backup = backupService.findByName(snapshotName);
} catch (IllegalArgumentException e) {
String message = String.format("Bad backup name %s: %s", snapshotName, e.getMessage());
logger.severe(e, message);
// TODO(b/19081569): Ideally this would return a 2XX error so the task would not be retried,
// but we might abandon backups that start late and haven't yet written to datastore.
// We could fix that by replacing this with a two-phase polling strategy.
rsp.sendError(SC_BAD_REQUEST, htmlEscaper().escape(message));
return;
}
// Stop now if the backup is not complete.
if (!backup.getStatus().equals(BackupStatus.COMPLETE)) {
Duration runningTime = backup.getRunningTime();
if (runningTime.isShorterThan(MAXIMUM_BACKUP_RUNNING_TIME)) {
// Backup might still be running, so send a 304 to have the task retry.
rsp.sendError(SC_NOT_MODIFIED,
htmlEscaper().escape(String.format("Datastore backup %s still pending", snapshotName)));
} else {
// Declare the backup a lost cause, and send 202 Accepted so the task will not be retried.
String message = String.format("Datastore backup %s abandoned - not complete after %s",
snapshotName,
PeriodFormat.getDefault().print(
runningTime.toPeriod().normalizedStandard(
PeriodType.dayTime().withMillisRemoved())));
logger.severe(message);
rsp.sendError(SC_ACCEPTED, htmlEscaper().escape(message));
}
return;
}
// Get a compact string to identify this snapshot in BigQuery by trying to parse the unique
// suffix out of the snapshot name and falling back to the start time as a string.
String snapshotId = snapshotName.startsWith(ExportSnapshotServlet.SNAPSHOT_PREFIX)
? snapshotName.substring(ExportSnapshotServlet.SNAPSHOT_PREFIX.length())
: backup.getStartTime().toString("YYYYMMdd_HHmmss");
// Log a warning if kindsToLoad is specified and not a subset of the exported snapshot kinds.
if (kindsToLoad.isPresent() && !backup.getKinds().containsAll(kindsToLoad.get())) {
logger.warningfmt("Kinds to load included non-exported kinds: %s",
Sets.difference(kindsToLoad.get(), backup.getKinds()));
}
// Load kinds from the snapshot, limited to those also in kindsToLoad (if it's present).
ImmutableSet<String> exportedKindsToLoad = ImmutableSet.copyOf(kindsToLoad.isPresent()
? Sets.intersection(backup.getKinds(), kindsToLoad.get())
: backup.getKinds());
String message = String.format("Datastore backup %s complete - ", snapshotName);
if (exportedKindsToLoad.isEmpty()) {
message += "no kinds to load into BigQuery";
} else {
loadSnapshotServlet.enqueueLoadTask(
snapshotId, backup.getGcsFilename().get(), exportedKindsToLoad);
message += "BigQuery load task enqueued";
}
logger.info(message);
rsp.getWriter().write(message);
}
/** Enqueue a poll task to monitor the named snapshot for completion. */
TaskHandle enqueuePollTask(String snapshotName, ImmutableSet<String> kindsToLoad) {
return QueueFactory.getQueue(QUEUE).add(
TaskOptions.Builder.withUrl(PATH)
.method(Method.POST)
.countdownMillis(POLL_COUNTDOWN.getMillis())
.param(SNAPSHOT_NAME_PARAM, snapshotName)
.param(SNAPSHOT_KINDS_TO_LOAD_PARAM, Joiner.on(',').join(kindsToLoad)));
}
}

View file

@ -0,0 +1,150 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.joda.time.DateTimeZone.UTC;
import com.google.appengine.api.datastore.Entity;
import com.google.appengine.api.datastore.Text;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableSet;
import com.google.domain.registry.util.Clock;
import com.google.domain.registry.util.NonFinalForTesting;
import com.google.domain.registry.util.SystemClock;
import org.joda.time.DateTime;
import org.joda.time.Duration;
import java.util.Date;
import java.util.List;
/** Container for information about a datastore backup. */
public class DatastoreBackupInfo {
@NonFinalForTesting
private static Clock clock = new SystemClock();
/** The possible status values for a datastore backup. */
public enum BackupStatus { PENDING, COMPLETE }
/** The name of the datastore backup. */
private final String backupName;
/** The entity kinds included in this datastore backup. */
private final ImmutableSet<String> kinds;
/** The start time of the datastore backup. */
private final DateTime startTime;
/** The completion time of the datastore backup, present if it has completed. */
private final Optional<DateTime> completeTime;
/**
* The GCS filename to which the backup's top-level .backup_info manifest file has been written,
* present if the backup has completed.
*/
private final Optional<String> gcsFilename;
/** DatastoreBackupInfo instances should only be obtained via DatastoreBackupService. */
DatastoreBackupInfo(Entity backupEntity) {
backupName = (String) checkNotNull(backupEntity.getProperty("name"), "name");
@SuppressWarnings("unchecked")
List<String> rawKinds = (List<String>) checkNotNull(backupEntity.getProperty("kinds"), "kinds");
Date rawStartTime = (Date) checkNotNull(backupEntity.getProperty("start_time"), "start_time");
Date rawCompleteTime = (Date) backupEntity.getProperty("complete_time");
Text rawGcsFilename = (Text) backupEntity.getProperty("gs_handle");
kinds = ImmutableSet.copyOf(rawKinds);
startTime = new DateTime(rawStartTime).withZone(UTC);
completeTime = Optional.fromNullable(
rawCompleteTime == null ? null : new DateTime(rawCompleteTime).withZone(UTC));
gcsFilename = Optional.fromNullable(
rawGcsFilename == null ? null : gcsPathToUri(rawGcsFilename.getValue()));
}
/** This constructor is only exposed for test purposes. */
@VisibleForTesting
DatastoreBackupInfo(
String backupName,
DateTime startTime,
Optional<DateTime> completeTime,
ImmutableSet<String> kinds,
Optional<String> gcsFilename) {
this.backupName = backupName;
this.startTime = startTime;
this.completeTime = completeTime;
this.kinds = kinds;
this.gcsFilename = gcsFilename;
}
/**
* Rewrite a GCS path as stored by Datastore Admin (with a "/gs/" prefix) to the more standard
* URI format that uses a "gs://" scheme prefix.
*/
private static String gcsPathToUri(String backupGcsPath) {
checkArgument(backupGcsPath.startsWith("/gs/"), "GCS path not in expected format");
return backupGcsPath.replaceFirst("/gs/", "gs://");
}
public String getName() {
return backupName;
}
public ImmutableSet<String> getKinds() {
return kinds;
}
public BackupStatus getStatus() {
return completeTime.isPresent() ? BackupStatus.COMPLETE : BackupStatus.PENDING;
}
public DateTime getStartTime() {
return startTime;
}
public Optional<DateTime> getCompleteTime() {
return completeTime;
}
/**
* Returns the length of time the backup ran for (if completed) or the length of time since the
* backup started (if it has not completed).
*/
public Duration getRunningTime() {
return new Duration(startTime, completeTime.or(clock.nowUtc()));
}
public Optional<String> getGcsFilename() {
return gcsFilename;
}
/** Returns a string version of key information about the backup. */
public String getInformation() {
return Joiner.on('\n')
.join(
"Backup name: " + backupName,
"Status: " + getStatus(),
"Started: " + startTime,
"Ended: " + completeTime.orNull(),
"Duration: " + getRunningTime().toPeriod().toString().substring(2).toLowerCase(),
"GCS: " + gcsFilename.orNull(),
"Kinds: " + kinds,
"");
}
}

View file

@ -0,0 +1,123 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.appengine.api.datastore.DatastoreServiceFactory.getDatastoreService;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static com.google.common.base.Strings.nullToEmpty;
import com.google.appengine.api.datastore.Entity;
import com.google.appengine.api.datastore.Query;
import com.google.appengine.api.modules.ModulesService;
import com.google.appengine.api.modules.ModulesServiceFactory;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.appengine.api.taskqueue.TaskOptions.Method;
import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.domain.registry.util.NonFinalForTesting;
import java.util.NoSuchElementException;
/** An object providing methods for starting and querying datastore backups. */
public class DatastoreBackupService {
/** The internal kind name used for entities storing information about datastore backups. */
static final String BACKUP_INFO_KIND = "_AE_Backup_Information";
/** The name of the app version used for hosting the Datastore Admin functionality. */
static final String DATASTORE_ADMIN_VERSION_NAME = "ah-builtin-python-bundle";
@NonFinalForTesting
private static ModulesService modulesService = ModulesServiceFactory.getModulesService();
/**
* Returns an instance of this service.
*
* <p>This method exists to allow for making the service a singleton object if desired at some
* future point; the choice is meaningless right now because the service maintains no state.
* That means its client-facing methods could in theory be static methods, but they are not
* because that makes it difficult to mock this service in clients.
*/
public static DatastoreBackupService get() {
return new DatastoreBackupService();
}
/**
* Generates the TaskOptions needed to trigger an AppEngine datastore backup job.
*
* @see "https://developers.google.com/appengine/articles/scheduled_backups"
*/
private static TaskOptions makeTaskOptions(
String queue, String name, String gcsBucket, ImmutableSet<String> kinds) {
String hostname = modulesService.getVersionHostname("default", DATASTORE_ADMIN_VERSION_NAME);
TaskOptions options = TaskOptions.Builder.withUrl("/_ah/datastore_admin/backup.create")
.header("Host", hostname)
.method(Method.GET)
.param("name", name + "_") // Add underscore since the name will be used as a prefix.
.param("filesystem", "gs")
.param("gs_bucket_name", gcsBucket)
.param("queue", queue)
.param("run_as_a_service", String.valueOf(true));
for (String kind : kinds) {
options.param("kind", kind);
}
return options;
}
/**
* Launches a new datastore backup with the given name, GCS bucket, and set of kinds by
* submitting a task to the given task queue, and returns a handle to that task.
*/
public TaskHandle launchNewBackup(
String queue, String name, String gcsBucket, ImmutableSet<String> kinds) {
return getQueue(queue).add(makeTaskOptions(queue, name, gcsBucket, kinds));
}
/** Return an iterable of all datastore backups whose names have the given string prefix. */
public Iterable<DatastoreBackupInfo> findAllByNamePrefix(final String namePrefix) {
// Need the raw DatastoreService to access the internal _AE_Backup_Information entities.
// TODO(b/19081037): make an Objectify entity class for these raw datastore entities instead.
return FluentIterable
.from(getDatastoreService().prepare(new Query(BACKUP_INFO_KIND)).asIterable())
.filter(new Predicate<Entity>() {
@Override
public boolean apply(Entity entity) {
return nullToEmpty((String) entity.getProperty("name")).startsWith(namePrefix);
}})
.transform(new Function<Entity, DatastoreBackupInfo>() {
@Override
public DatastoreBackupInfo apply(Entity entity) {
return new DatastoreBackupInfo(entity);
}});
}
/**
* Return a single DatastoreBackup that uniquely matches this name prefix. Throws an IAE
* if no backups match or if more than one backup matches.
*/
public DatastoreBackupInfo findByName(final String namePrefix) {
try {
return Iterables.getOnlyElement(findAllByNamePrefix(namePrefix));
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("More than one backup with name prefix " + namePrefix, e);
} catch (NoSuchElementException e) {
throw new IllegalArgumentException("No backup found with name prefix " + namePrefix, e);
}
}
}

Some files were not shown because too many files have changed in this diff Show more