mv com/google/domain/registry google/registry

This change renames directories in preparation for the great package
rename. The repository is now in a broken state because the code
itself hasn't been updated. However this should ensure that git
correctly preserves history for each file.
This commit is contained in:
Justine Tunney 2016-05-13 18:55:08 -04:00
parent a41677aea1
commit 5012893c1d
2396 changed files with 0 additions and 0 deletions

View file

@ -1,11 +0,0 @@
package(default_visibility = ["//visibility:public"])
package_group(
name = "registry_project",
packages = [
"//java/com/google/domain/registry/...",
"//javatests/com/google/domain/registry/...",
],
)

View file

@ -1,31 +0,0 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
java_library(
name = "backup",
srcs = glob(["*.java"]),
deps = [
"//java/com/google/common/annotations",
"//java/com/google/common/base",
"//java/com/google/common/collect",
"//java/com/google/common/net",
"//java/com/google/common/primitives",
"//java/com/google/common/util/concurrent",
"//java/com/google/domain/registry/config",
"//java/com/google/domain/registry/cron",
"//java/com/google/domain/registry/model",
"//java/com/google/domain/registry/request",
"//java/com/google/domain/registry/util",
"//third_party/java/appengine:appengine-api",
"//third_party/java/appengine_gcs_client",
"//third_party/java/dagger",
"//third_party/java/joda_time",
"//third_party/java/json_simple",
"//third_party/java/jsr305_annotations",
"//third_party/java/jsr330_inject",
"//third_party/java/objectify:objectify-v4_1",
"//third_party/java/servlet/servlet_api",
],
)

View file

@ -1,92 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.appengine.api.ThreadManager.currentRequestThreadFactory;
import static com.google.common.util.concurrent.MoreExecutors.listeningDecorator;
import static com.google.domain.registry.backup.ExportCommitLogDiffAction.LOWER_CHECKPOINT_TIME_PARAM;
import static com.google.domain.registry.backup.ExportCommitLogDiffAction.UPPER_CHECKPOINT_TIME_PARAM;
import static com.google.domain.registry.backup.RestoreCommitLogsAction.FROM_TIME_PARAM;
import static com.google.domain.registry.request.RequestParameters.extractRequiredDatetimeParameter;
import static com.google.domain.registry.request.RequestParameters.extractRequiredParameter;
import static java.util.concurrent.Executors.newFixedThreadPool;
import com.google.common.primitives.Ints;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.domain.registry.cron.CommitLogFanoutAction;
import com.google.domain.registry.request.HttpException.BadRequestException;
import com.google.domain.registry.request.Parameter;
import dagger.Module;
import dagger.Provides;
import org.joda.time.DateTime;
import java.lang.annotation.Documented;
import javax.inject.Qualifier;
import javax.servlet.http.HttpServletRequest;
/**
* Dagger module for backup package.
*
* @see "com.google.domain.registry.module.backend.BackendComponent"
*/
@Module
public final class BackupModule {
/** Dagger qualifier for backups. */
@Qualifier
@Documented
public static @interface Backups {}
/** Number of threads in the threaded executor. */
private static final int NUM_THREADS = 10;
@Provides
@Parameter("bucket")
static int provideBucket(HttpServletRequest req) {
String param = extractRequiredParameter(req, CommitLogFanoutAction.BUCKET_PARAM);
Integer bucket = Ints.tryParse(param);
if (bucket == null) {
throw new BadRequestException("Bad bucket id");
}
return bucket;
}
@Provides
@Parameter(LOWER_CHECKPOINT_TIME_PARAM)
static DateTime provideLowerCheckpointKey(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, LOWER_CHECKPOINT_TIME_PARAM);
}
@Provides
@Parameter(UPPER_CHECKPOINT_TIME_PARAM)
static DateTime provideUpperCheckpointKey(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, UPPER_CHECKPOINT_TIME_PARAM);
}
@Provides
@Parameter(FROM_TIME_PARAM)
static DateTime provideFromTime(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, FROM_TIME_PARAM);
}
@Provides
@Backups
static ListeningExecutorService provideListeningExecutorService() {
return listeningDecorator(newFixedThreadPool(NUM_THREADS, currentRequestThreadFactory()));
}
}

View file

@ -1,75 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.domain.registry.model.ofy.ObjectifyService.ofy;
import com.google.appengine.api.datastore.EntityTranslator;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.ImmutableList;
import com.google.domain.registry.model.ImmutableObject;
import com.google.storage.onestore.v3.OnestoreEntity.EntityProto;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Iterator;
/** Utilities for working with backups. */
public class BackupUtils {
/** Keys for user metadata fields on commit log files in GCS. */
public static final class GcsMetadataKeys {
private GcsMetadataKeys() {}
public static final String NUM_TRANSACTIONS = "num_transactions";
public static final String LOWER_BOUND_CHECKPOINT = "lower_bound_checkpoint";
public static final String UPPER_BOUND_CHECKPOINT = "upper_bound_checkpoint";
}
/**
* Converts the given {@link ImmutableObject} to a raw datastore entity and write it to an
* {@link OutputStream} in delimited protocol buffer format.
*/
static void serializeEntity(ImmutableObject entity, OutputStream stream) throws IOException {
EntityTranslator.convertToPb(ofy().save().toEntity(entity)).writeDelimitedTo(stream);
}
/**
* Return an iterator of {@link ImmutableObject} instances deserialized from the given stream.
*
* <p>This parses out delimited protocol buffers for raw datastore entities and then Ofy-loads
* those as {@link ImmutableObject}.
*
* <p>The iterator reads from the stream on demand, and as such will fail if the stream is closed.
*/
public static Iterator<ImmutableObject> createDeserializingIterator(final InputStream input) {
return new AbstractIterator<ImmutableObject>() {
@Override
protected ImmutableObject computeNext() {
EntityProto proto = new EntityProto();
if (proto.parseDelimitedFrom(input)) { // False means end of stream; other errors throw.
return ofy().load().<ImmutableObject>fromEntity(EntityTranslator.createFromPb(proto));
}
return endOfData();
}};
}
public static ImmutableList<ImmutableObject> deserializeEntities(byte[] bytes) {
return ImmutableList.copyOf(createDeserializingIterator(new ByteArrayInputStream(bytes)));
}
}

View file

@ -1,86 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static com.google.appengine.api.taskqueue.TaskOptions.Builder.withUrl;
import static com.google.domain.registry.backup.ExportCommitLogDiffAction.LOWER_CHECKPOINT_TIME_PARAM;
import static com.google.domain.registry.backup.ExportCommitLogDiffAction.UPPER_CHECKPOINT_TIME_PARAM;
import static com.google.domain.registry.model.ofy.ObjectifyService.ofy;
import static com.google.domain.registry.util.DateTimeUtils.isBeforeOrAt;
import static com.google.domain.registry.util.FormattingLogger.getLoggerForCallerClass;
import com.google.domain.registry.model.ofy.CommitLogCheckpoint;
import com.google.domain.registry.model.ofy.CommitLogCheckpointRoot;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.util.Clock;
import com.google.domain.registry.util.FormattingLogger;
import com.google.domain.registry.util.TaskEnqueuer;
import com.googlecode.objectify.VoidWork;
import org.joda.time.DateTime;
import javax.inject.Inject;
/**
* Action that saves commit log checkpoints to datastore and kicks off a diff export task.
*
* <p>We separate computing and saving the checkpoint from exporting it because the export to GCS
* is retryable but should not require the computation of a new checkpoint. Saving the checkpoint
* and enqueuing the export task are done transactionally, so any checkpoint that is saved will be
* exported to GCS very soon.
*
* <p>This action's supported method is GET rather than POST because it gets invoked via cron.
*/
@Action(
path = "/_dr/cron/commitLogCheckpoint",
method = Action.Method.GET,
automaticallyPrintOk = true)
public final class CommitLogCheckpointAction implements Runnable {
private static final FormattingLogger logger = getLoggerForCallerClass();
private static final String QUEUE_NAME = "export-commits";
@Inject Clock clock;
@Inject CommitLogCheckpointStrategy strategy;
@Inject TaskEnqueuer taskEnqueuer;
@Inject CommitLogCheckpointAction() {}
@Override
public void run() {
final CommitLogCheckpoint checkpoint = strategy.computeCheckpoint();
logger.info("Generated candidate checkpoint for time " + checkpoint.getCheckpointTime());
ofy().transact(new VoidWork() {
@Override
public void vrun() {
DateTime lastWrittenTime = CommitLogCheckpointRoot.loadRoot().getLastWrittenTime();
if (isBeforeOrAt(checkpoint.getCheckpointTime(), lastWrittenTime)) {
logger.info("Newer checkpoint already written at time: " + lastWrittenTime);
return;
}
ofy().saveWithoutBackup().entities(
checkpoint,
CommitLogCheckpointRoot.create(checkpoint.getCheckpointTime()));
// Enqueue a diff task between previous and current checkpoints.
taskEnqueuer.enqueue(
getQueue(QUEUE_NAME),
withUrl(ExportCommitLogDiffAction.PATH)
.param(LOWER_CHECKPOINT_TIME_PARAM, lastWrittenTime.toString())
.param(UPPER_CHECKPOINT_TIME_PARAM, checkpoint.getCheckpointTime().toString()));
}});
}
}

View file

@ -1,180 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.common.collect.Iterables.getOnlyElement;
import static com.google.common.collect.Maps.transformValues;
import static com.google.domain.registry.model.ofy.CommitLogBucket.getBucketKey;
import static com.google.domain.registry.util.DateTimeUtils.END_OF_TIME;
import static com.google.domain.registry.util.DateTimeUtils.earliestOf;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.collect.ImmutableMap;
import com.google.domain.registry.model.ofy.CommitLogBucket;
import com.google.domain.registry.model.ofy.CommitLogCheckpoint;
import com.google.domain.registry.model.ofy.CommitLogManifest;
import com.google.domain.registry.model.ofy.Ofy;
import com.google.domain.registry.util.Clock;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.Work;
import org.joda.time.DateTime;
import java.util.List;
import java.util.Map.Entry;
import javax.inject.Inject;
/**
* Implementation of the procedure for determining point-in-time consistent commit log checkpoint.
*
* <p>This algorithm examines the recently written commit log data and uses a dual-read approach
* to determine a point-in-time consistent set of checkpoint times for the commit log buckets. By
* "consistent" we mean, generally speaking, that if the datastore were restored by replaying all
* the commit logs up to the checkpoint times of the buckets, the result would be transactionally
* correct; there must be no "holes" where restored state depends on non-restored state.
*
* <p>The consistency guarantee really has two parts, only one of which is provided by this
* algorithm. The procedure below guarantees only that if the resulting checkpoint includes any
* given commit log, it will also include all the commit logs that were both 1) actually written
* before that commit log "in real life", and 2) have an earlier timestamp than that commit log.
* (These criteria do not necessarily imply each other, due to the lack of a global shared clock.)
* The rest of the guarantee comes from our Ofy customizations, which ensure that any transaction
* that depends on state from a previous transaction does indeed have a later timestamp.
*
* <h2>Procedure description</h2>
* <pre>
* {@code
* ComputeCheckpoint() -> returns a set consisting of a timestamp c(b_i) for every bucket b_i
*
* 1) read off the latest commit timestamp t(b_i) for every bucket b_i
* 2) iterate over the buckets b_i a second time, and
* a) do a consistent query for the next commit timestamp t'(b_i) where t'(b_i) > t(b_i)
* b) if present, add this timestamp t'(b_i) to a set S
* 3) compute a threshold time T* representing a time before all commits in S, as follows:
* a) if S is empty, let T* = + (or the "end of time")
* b) else, let T* = T - Δ, for T = min(S) and some small Δ > 0
* 4) return the set given by: min(t(b_i), T*) for all b_i
* }
* </pre>
*
* <h2>Correctness proof of algorithm</h2>
*
* <p>{@literal
* As described above, the algorithm is correct as long as it can ensure the following: given a
* commit log X written at time t(X) to bucket b_x, and another commit log Y that was written "in
* real life" before X and for which t(Y) < t(X), then if X is included in the checkpoint, so is Y;
* that is, t(X) <= c(b_x) implies t(Y) <= c(b_y).
* }
*
* <p>{@literal
* To prove this, first note that we always have c(b_i) <= t(b_i) for every b_i, i.e. every commit
* log included in the checkpoint must have been seen in the first pass. Hence if X was included,
* then X must have been written by the time we started the second pass. But since Y was written
* "in real life" prior to X, we must have seen Y by the second pass too.
* }
*
* <p>{@literal
* Now assume towards a contradiction that X is indeed included but Y is not, i.e. that we have
* t(X) <= c(b_x) but t(Y) > c(b_y). If Y was seen in the first pass, i.e. t(Y) <= t(b_y), then by
* our assumption c(b_y) < t(Y) <= t(b_y), and therefore c(b_y) != t(b_y). By the definition of
* c(b_y) it must then equal T*, so we have T* < t(Y). However, this is a contradiction since
* t(Y) < t(X) and t(X) <= c(b_x) <= T*. If instead Y was seen in the second pass but not the
* first, t'(b_y) exists and we must have t'(b_y) <= t(Y), but then since T* < T <= t'(b_y) by
* definition, we again reach the contradiction T* < t(Y).
* }
*/
class CommitLogCheckpointStrategy {
@Inject Ofy ofy;
@Inject Clock clock;
@Inject CommitLogCheckpointStrategy() {}
/** Compute and return a new CommitLogCheckpoint for the current point in time. */
public CommitLogCheckpoint computeCheckpoint() {
DateTime checkpointTime = clock.nowUtc();
ImmutableMap<Integer, DateTime> firstPassTimes = readBucketTimestamps();
DateTime threshold = readNewCommitLogsAndFindThreshold(firstPassTimes);
return CommitLogCheckpoint.create(
checkpointTime,
computeBucketCheckpointTimes(firstPassTimes, threshold));
}
/**
* Returns a map from all bucket IDs to their current last written time values, fetched without
* a transaction so with no guarantee of consistency across buckets.
*/
@VisibleForTesting
ImmutableMap<Integer, DateTime> readBucketTimestamps() {
// Use a fresh session cache so that we get the latest data from datastore.
return ofy.doWithFreshSessionCache(new Work<ImmutableMap<Integer, DateTime>>() {
@Override
public ImmutableMap<Integer, DateTime> run() {
ImmutableMap.Builder<Integer, DateTime> results = new ImmutableMap.Builder<>();
for (CommitLogBucket bucket : CommitLogBucket.loadAllBuckets()) {
results.put(bucket.getBucketNum(), bucket.getLastWrittenTime());
}
return results.build();
}});
}
/**
* Returns a threshold value defined as the latest timestamp that is before all new commit logs,
* where "new" means having a commit time after the per-bucket timestamp in the given map.
* When no such commit logs exist, the threshold value is set to END_OF_TIME.
*/
@VisibleForTesting
DateTime readNewCommitLogsAndFindThreshold(ImmutableMap<Integer, DateTime> bucketTimes) {
DateTime timeBeforeAllNewCommits = END_OF_TIME;
for (Entry<Integer, DateTime> entry : bucketTimes.entrySet()) {
Key<CommitLogBucket> bucketKey = getBucketKey(entry.getKey());
DateTime bucketTime = entry.getValue();
// Add 1 to handle START_OF_TIME since 0 isn't a valid id - filter then uses >= instead of >.
Key<CommitLogManifest> keyForFilter =
Key.create(CommitLogManifest.create(bucketKey, bucketTime.plusMillis(1), null));
List<Key<CommitLogManifest>> manifestKeys =
ofy.load()
.type(CommitLogManifest.class)
.ancestor(bucketKey)
.filterKey(">=", keyForFilter)
.limit(1)
.keys()
.list();
if (!manifestKeys.isEmpty()) {
timeBeforeAllNewCommits = earliestOf(
timeBeforeAllNewCommits,
CommitLogManifest.extractCommitTime(getOnlyElement(manifestKeys)).minusMillis(1));
}
}
return timeBeforeAllNewCommits;
}
/**
* Returns the bucket checkpoint times produced by clamping the given set of bucket timestamps to
* at most the given threshold value.
*/
@VisibleForTesting
ImmutableMap<Integer, DateTime> computeBucketCheckpointTimes(
ImmutableMap<Integer, DateTime> firstPassTimes,
final DateTime threshold) {
return ImmutableMap.copyOf(transformValues(firstPassTimes, new Function<DateTime, DateTime>() {
@Override
public DateTime apply(DateTime firstPassTime) {
return earliestOf(firstPassTime, threshold);
}}));
}
}

View file

@ -1,160 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.common.collect.ImmutableList.copyOf;
import static com.google.common.collect.Iterables.concat;
import static com.google.common.collect.Iterables.transform;
import static com.google.domain.registry.model.ofy.CommitLogBucket.getBucketKey;
import static com.google.domain.registry.request.Action.Method.POST;
import com.google.common.base.Function;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.model.ofy.CommitLogBucket;
import com.google.domain.registry.model.ofy.CommitLogManifest;
import com.google.domain.registry.model.ofy.CommitLogMutation;
import com.google.domain.registry.model.ofy.Ofy;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.util.Clock;
import com.google.domain.registry.util.FormattingLogger;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.Work;
import com.googlecode.objectify.cmd.Loader;
import com.googlecode.objectify.cmd.Query;
import org.joda.time.DateTime;
import org.joda.time.Duration;
import java.util.List;
import javax.inject.Inject;
/**
* Task that garbage collects old {@link CommitLogManifest} entities.
*
* <p>Once commit logs have been written to GCS, we don't really need them in datastore anymore,
* except to reconstruct point-in-time snapshots of the database. But that functionality is not
* useful after a certain amount of time, e.g. thirty days. So this task runs periodically to delete
* the old data.
*
* <p>This task should be invoked in a fanout style for each {@link CommitLogBucket} ID. It then
* queries {@code CommitLogManifest} entities older than the threshold, using an ancestor query
* operating under the assumption under the assumption that the ID is the transaction timestamp in
* milliseconds since the UNIX epoch. It then deletes them inside a transaction, along with their
* associated {@link CommitLogMutation} entities.
*
* <p>If additional data is leftover, we show a warning at the INFO level, because it's not
* actionable. If anything, it just shows that the system was under high load thirty days ago, and
* therefore serves little use as an early warning to increase the number of buckets.
*
* <p>Before running, this task will perform an eventually consistent count query outside of a
* transaction to see how much data actually exists to delete. If it's less than a tenth of
* {@link #maxDeletes}, then we don't bother running the task. This is to minimize contention on the
* bucket and avoid wasting resources.
*
* <h3>Dimensioning</h3>
*
* <p>This entire operation operates on a single entity group, within a single transaction. Since
* there's a 10mB upper bound on transaction size and a four minute time limit, we can only delete
* so many commit logs at once. So given the above constraints, five hundred would make a safe
* default value for {@code maxDeletes}. See {@linkplain
* com.google.domain.registry.config.ConfigModule#provideCommitLogMaxDeletes() commitLogMaxDeletes}
* for further documentation on this matter.
*
* <p>Finally, we need to pick an appropriate cron interval time for this task. Since a bucket
* represents a single datastore entity group, it's only guaranteed to have one transaction per
* second. So we just need to divide {@code maxDeletes} by sixty to get an appropriate minute
* interval. Assuming {@code maxDeletes} is five hundred, this rounds up to ten minutes, which we'll
* double, since this task can always catch up in off-peak hours.
*
* <p>There's little harm in keeping the data around a little longer, since this task is engaged in
* a zero-sum resource struggle with the EPP transactions. Each transaction we perform here, is one
* less transaction that's available to EPP. Furthermore, a well-administered system should have
* enough buckets that we'll never brush up against the 1/s entity group transaction SLA.
*/
@Action(path = "/_dr/task/deleteOldCommitLogs", method = POST, automaticallyPrintOk = true)
public final class DeleteOldCommitLogsAction implements Runnable {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@Inject Clock clock;
@Inject Ofy ofy;
@Inject @Parameter("bucket") int bucketNum;
@Inject @Config("commitLogDatastoreRetention") Duration maxAge;
@Inject @Config("commitLogMaxDeletes") int maxDeletes;
@Inject DeleteOldCommitLogsAction() {}
@Override
public void run() {
if (!doesEnoughDataExistThatThisTaskIsWorthRunning()) {
return;
}
Integer deleted = ofy.transact(new Work<Integer>() {
@Override
public Integer run() {
// Load at most maxDeletes manifest keys of commit logs older than the deletion threshold.
List<Key<CommitLogManifest>> manifestKeys =
queryManifests(ofy.load())
.limit(maxDeletes)
.keys()
.list();
// transform() is lazy so copyOf() ensures all the subqueries happen in parallel, because
// the queries are launched by iterable(), put into a list, and then the list of iterables
// is consumed and concatenated.
ofy.deleteWithoutBackup().keys(concat(copyOf(transform(manifestKeys,
new Function<Key<CommitLogManifest>, Iterable<Key<CommitLogMutation>>>() {
@Override
public Iterable<Key<CommitLogMutation>> apply(Key<CommitLogManifest> manifestKey) {
return ofy.load()
.type(CommitLogMutation.class)
.ancestor(manifestKey)
.keys()
.iterable(); // launches the query asynchronously
}}))));
ofy.deleteWithoutBackup().keys(manifestKeys);
return manifestKeys.size();
}});
if (deleted == maxDeletes) {
logger.infofmt("Additional old commit logs might exist in bucket %d", bucketNum);
}
}
/** Returns the point in time at which commit logs older than that point will be deleted. */
private DateTime getDeletionThreshold() {
return clock.nowUtc().minus(maxAge);
}
private boolean doesEnoughDataExistThatThisTaskIsWorthRunning() {
int tenth = Math.max(1, maxDeletes / 10);
int count = queryManifests(ofy.loadEventuallyConsistent())
.limit(tenth)
.count();
if (0 < count && count < tenth) {
logger.infofmt("Not enough old commit logs to bother running: %d < %d", count, tenth);
}
return count >= tenth;
}
private Query<CommitLogManifest> queryManifests(Loader loader) {
long thresholdMillis = getDeletionThreshold().getMillis();
Key<CommitLogBucket> bucketKey = getBucketKey(bucketNum);
return loader
.type(CommitLogManifest.class)
.ancestor(bucketKey)
.filterKey("<", Key.create(bucketKey, CommitLogManifest.class, thresholdMillis));
}
}

View file

@ -1,219 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Verify.verifyNotNull;
import static com.google.common.collect.Iterables.concat;
import static com.google.common.collect.Lists.partition;
import static com.google.domain.registry.backup.BackupUtils.GcsMetadataKeys.LOWER_BOUND_CHECKPOINT;
import static com.google.domain.registry.backup.BackupUtils.GcsMetadataKeys.NUM_TRANSACTIONS;
import static com.google.domain.registry.backup.BackupUtils.GcsMetadataKeys.UPPER_BOUND_CHECKPOINT;
import static com.google.domain.registry.backup.BackupUtils.serializeEntity;
import static com.google.domain.registry.model.ofy.CommitLogBucket.getBucketKey;
import static com.google.domain.registry.model.ofy.ObjectifyService.ofy;
import static com.google.domain.registry.util.DateTimeUtils.START_OF_TIME;
import static com.google.domain.registry.util.DateTimeUtils.isAtOrAfter;
import static com.google.domain.registry.util.FormattingLogger.getLoggerForCallerClass;
import static java.nio.channels.Channels.newOutputStream;
import static java.util.Arrays.asList;
import com.google.appengine.tools.cloudstorage.GcsFileOptions;
import com.google.appengine.tools.cloudstorage.GcsFilename;
import com.google.appengine.tools.cloudstorage.GcsService;
import com.google.common.base.Function;
import com.google.common.collect.ComparisonChain;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.model.ImmutableObject;
import com.google.domain.registry.model.ofy.CommitLogBucket;
import com.google.domain.registry.model.ofy.CommitLogCheckpoint;
import com.google.domain.registry.model.ofy.CommitLogManifest;
import com.google.domain.registry.model.ofy.CommitLogMutation;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.util.FormattingLogger;
import com.googlecode.objectify.Key;
import org.joda.time.DateTime;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Collection;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import javax.annotation.Nullable;
import javax.inject.Inject;
/** Action that exports the diff between two commit log checkpoints to GCS. */
@Action(
path = ExportCommitLogDiffAction.PATH,
method = Action.Method.POST,
automaticallyPrintOk = true)
public final class ExportCommitLogDiffAction implements Runnable {
private static final FormattingLogger logger = getLoggerForCallerClass();
static final String PATH = "/_dr/task/exportCommitLogDiff";
static final String UPPER_CHECKPOINT_TIME_PARAM = "upperCheckpointTime";
static final String LOWER_CHECKPOINT_TIME_PARAM = "lowerCheckpointTime";
public static final String DIFF_FILE_PREFIX = "commit_diff_until_";
@Inject GcsService gcsService;
@Inject @Config("commitLogGcsBucket") String gcsBucket;
@Inject @Config("commitLogDiffExportBatchSize") int batchSize;
@Inject @Parameter(LOWER_CHECKPOINT_TIME_PARAM) DateTime lowerCheckpointTime;
@Inject @Parameter(UPPER_CHECKPOINT_TIME_PARAM) DateTime upperCheckpointTime;
@Inject ExportCommitLogDiffAction() {}
@Override
public void run() {
checkArgument(isAtOrAfter(lowerCheckpointTime, START_OF_TIME));
checkArgument(lowerCheckpointTime.isBefore(upperCheckpointTime));
// Load the boundary checkpoints - lower is exclusive and may not exist (on the first export,
// when lowerCheckpointTime is START_OF_TIME), whereas the upper is inclusive and must exist.
CommitLogCheckpoint lowerCheckpoint = lowerCheckpointTime.isAfter(START_OF_TIME)
? verifyNotNull(ofy().load().key(CommitLogCheckpoint.createKey(lowerCheckpointTime)).now())
: null;
CommitLogCheckpoint upperCheckpoint =
verifyNotNull(ofy().load().key(CommitLogCheckpoint.createKey(upperCheckpointTime)).now());
// Load the keys of all the manifests to include in this diff.
List<Key<CommitLogManifest>> sortedKeys = loadAllDiffKeys(lowerCheckpoint, upperCheckpoint);
logger.infofmt("Found %d manifests to export", sortedKeys.size());
// Open an output channel to GCS, wrapped in a stream for convenience.
try (OutputStream gcsStream = newOutputStream(gcsService.createOrReplace(
new GcsFilename(gcsBucket, DIFF_FILE_PREFIX + upperCheckpointTime),
new GcsFileOptions.Builder()
.addUserMetadata(LOWER_BOUND_CHECKPOINT, lowerCheckpointTime.toString())
.addUserMetadata(UPPER_BOUND_CHECKPOINT, upperCheckpointTime.toString())
.addUserMetadata(NUM_TRANSACTIONS, Integer.toString(sortedKeys.size()))
.build()))) {
// Export the upper checkpoint itself.
serializeEntity(upperCheckpoint, gcsStream);
// If there are no manifests to export, stop early, now that we've written out the file with
// the checkpoint itself (which is needed for restores, even if it's empty).
if (sortedKeys.isEmpty()) {
return;
}
// Export to GCS in chunks, one per fixed batch of commit logs. While processing one batch,
// asynchronously load the entities for the next one.
List<List<Key<CommitLogManifest>>> keyChunks = partition(sortedKeys, batchSize);
// Objectify's map return type is asynchronous. Calling .values() will block until it loads.
Map<?, CommitLogManifest> nextChunkToExport = ofy().load().keys(keyChunks.get(0));
for (int i = 0; i < keyChunks.size(); i++) {
// Force the async load to finish.
Collection<CommitLogManifest> chunkValues = nextChunkToExport.values();
logger.infofmt("Loaded %d manifests", chunkValues.size());
// Since there is no hard bound on how much data this might be, take care not to let the
// Objectify session cache fill up and potentially run out of memory. This is the only safe
// point to do this since at this point there is no async load in progress.
ofy().clearSessionCache();
// Kick off the next async load, which can happen in parallel to the current GCS export.
if (i + 1 < keyChunks.size()) {
nextChunkToExport = ofy().load().keys(keyChunks.get(i + 1));
}
exportChunk(gcsStream, chunkValues);
logger.infofmt("Exported %d manifests", chunkValues.size());
}
} catch (IOException e) {
throw new RuntimeException(e);
}
logger.infofmt("Exported %d manifests in total", sortedKeys.size());
}
/**
* Loads all the diff keys, sorted in a transaction-consistent chronological order.
*
* @param lowerCheckpoint exclusive lower bound on keys in this diff, or null if no lower bound
* @param upperCheckpoint inclusive upper bound on keys in this diff
*/
private ImmutableList<Key<CommitLogManifest>> loadAllDiffKeys(
@Nullable final CommitLogCheckpoint lowerCheckpoint,
final CommitLogCheckpoint upperCheckpoint) {
// Fetch the keys (no data) between these checkpoints, and sort by timestamp. This ordering is
// transaction-consistent by virtue of our checkpoint strategy and our customized Ofy; see
// CommitLogCheckpointStrategy for the proof. We break ties by sorting on bucket ID to ensure
// a deterministic order.
return FluentIterable.from(upperCheckpoint.getBucketTimestamps().keySet())
.transformAndConcat(new Function<Integer, Iterable<Key<CommitLogManifest>>>() {
@Override
public Iterable<Key<CommitLogManifest>> apply(Integer bucketNum) {
return loadDiffKeysFromBucket(lowerCheckpoint, upperCheckpoint, bucketNum);
}})
.toSortedList(new Comparator<Key<CommitLogManifest>>() {
@Override
public int compare(Key<CommitLogManifest> a, Key<CommitLogManifest> b) {
// Compare keys by timestamp (which is encoded in the id as millis), then by bucket id.
return ComparisonChain.start()
.compare(a.getId(), b.getId())
.compare(a.getParent().getId(), b.getParent().getId())
.result();
}});
}
/**
* Loads the diff keys for one bucket.
*
* @param lowerCheckpoint exclusive lower bound on keys in this diff, or null if no lower bound
* @param upperCheckpoint inclusive upper bound on keys in this diff
* @param bucketNum the bucket to load diff keys from
*/
private Iterable<Key<CommitLogManifest>> loadDiffKeysFromBucket(
@Nullable CommitLogCheckpoint lowerCheckpoint,
CommitLogCheckpoint upperCheckpoint,
int bucketNum) {
// If no lower checkpoint exists, use START_OF_TIME as the effective exclusive lower bound.
DateTime lowerCheckpointBucketTime = lowerCheckpoint == null
? START_OF_TIME
: lowerCheckpoint.getBucketTimestamps().get(bucketNum);
// Since START_OF_TIME=0 is not a valid id in a key, add 1 to both bounds. Then instead of
// loading lowerBound < x <= upperBound, we can load lowerBound <= x < upperBound.
DateTime lowerBound = lowerCheckpointBucketTime.plusMillis(1);
DateTime upperBound = upperCheckpoint.getBucketTimestamps().get(bucketNum).plusMillis(1);
// If the lower and upper bounds are equal, there can't be any results, so skip the query.
if (lowerBound.equals(upperBound)) {
return ImmutableSet.of();
}
Key<CommitLogBucket> bucketKey = getBucketKey(bucketNum);
return ofy().load()
.type(CommitLogManifest.class)
.ancestor(bucketKey)
.filterKey(">=", CommitLogManifest.createKey(bucketKey, lowerBound))
.filterKey("<", CommitLogManifest.createKey(bucketKey, upperBound))
.keys();
}
/** Writes a chunks-worth of manifests and associated mutations to GCS. */
private void exportChunk(OutputStream gcsStream, Collection<CommitLogManifest> chunk)
throws IOException {
// Kickoff async loads for all the manifests in the chunk.
ImmutableList.Builder<Iterable<? extends ImmutableObject>> entities =
new ImmutableList.Builder<>();
for (CommitLogManifest manifest : chunk) {
entities.add(asList(manifest));
entities.add(ofy().load().type(CommitLogMutation.class).ancestor(manifest));
}
for (ImmutableObject entity : concat(entities.build())) {
serializeEntity(entity, gcsStream);
}
}
}

View file

@ -1,128 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.common.base.Preconditions.checkState;
import static com.google.domain.registry.backup.BackupUtils.GcsMetadataKeys.LOWER_BOUND_CHECKPOINT;
import static com.google.domain.registry.backup.ExportCommitLogDiffAction.DIFF_FILE_PREFIX;
import static com.google.domain.registry.util.DateTimeUtils.START_OF_TIME;
import static com.google.domain.registry.util.DateTimeUtils.isBeforeOrAt;
import static com.google.domain.registry.util.DateTimeUtils.latestOf;
import com.google.appengine.tools.cloudstorage.GcsFileMetadata;
import com.google.appengine.tools.cloudstorage.GcsFilename;
import com.google.appengine.tools.cloudstorage.GcsService;
import com.google.appengine.tools.cloudstorage.ListItem;
import com.google.appengine.tools.cloudstorage.ListOptions;
import com.google.common.collect.ImmutableList;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.domain.registry.backup.BackupModule.Backups;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.util.FormattingLogger;
import org.joda.time.DateTime;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import javax.inject.Inject;
/** Utility class to list commit logs diff files stored on GCS. */
class GcsDiffFileLister {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@Inject GcsService gcsService;
@Inject @Config("commitLogGcsBucket") String gcsBucket;
@Inject @Backups ListeningExecutorService executor;
@Inject GcsDiffFileLister() {}
List<GcsFileMetadata> listDiffFiles(DateTime fromTime) {
logger.info("Requested restore from time: " + fromTime);
// List all of the diff files on GCS and build a map from each file's upper checkpoint time
// (extracted from the filename) to its asynchronously-loaded metadata, keeping only files with
// an upper checkpoint time > fromTime.
Map<DateTime, ListenableFuture<GcsFileMetadata>> upperBoundTimesToMetadata = new HashMap<>();
Iterator<ListItem> listItems;
try {
// TODO(b/23554360): Use a smarter prefixing strategy to speed this up.
listItems = gcsService.list(
gcsBucket,
new ListOptions.Builder().setPrefix(DIFF_FILE_PREFIX).build());
} catch (IOException e) {
throw new RuntimeException(e);
}
DateTime lastUpperBoundTime = START_OF_TIME;
while (listItems.hasNext()) {
final String filename = listItems.next().getName();
DateTime upperBoundTime = DateTime.parse(filename.substring(DIFF_FILE_PREFIX.length()));
if (isBeforeOrAt(fromTime, upperBoundTime)) {
upperBoundTimesToMetadata.put(upperBoundTime, executor.submit(
new Callable<GcsFileMetadata>() {
@Override
public GcsFileMetadata call() throws Exception {
return getMetadata(filename);
}}));
}
lastUpperBoundTime = latestOf(upperBoundTime, lastUpperBoundTime);
}
if (upperBoundTimesToMetadata.isEmpty()) {
logger.info("No files found");
return ImmutableList.of();
}
// GCS file listing is eventually consistent, so it's possible that we are missing a file. The
// metadata of a file is sufficient to identify the preceding file, so if we start from the
// last file and work backwards we can verify that we have no holes in our chain (although we
// may be missing files at the end).
ImmutableList.Builder<GcsFileMetadata> filesBuilder = new ImmutableList.Builder<>();
logger.info("Restoring until: " + lastUpperBoundTime);
DateTime checkpointTime = lastUpperBoundTime;
while (checkpointTime.isAfter(fromTime)) {
GcsFileMetadata metadata;
if (upperBoundTimesToMetadata.containsKey(checkpointTime)) {
metadata = Futures.getUnchecked(upperBoundTimesToMetadata.get(checkpointTime));
} else {
String filename = DIFF_FILE_PREFIX + checkpointTime;
logger.info("Patching GCS list; discovered file " + filename);
metadata = getMetadata(filename);
checkState(metadata != null, "Could not read metadata for file %s", filename);
}
filesBuilder.add(metadata);
checkpointTime = getLowerBoundTime(metadata);
}
ImmutableList<GcsFileMetadata> files = filesBuilder.build().reverse();
logger.info("Actual restore from time: " + getLowerBoundTime(files.get(0)));
logger.infofmt("Found %d files to restore", files.size());
return files;
}
private DateTime getLowerBoundTime(GcsFileMetadata metadata) {
return DateTime.parse(metadata.getOptions().getUserMetadata().get(LOWER_BOUND_CHECKPOINT));
}
private GcsFileMetadata getMetadata(String filename) {
try {
return gcsService.getMetadata(new GcsFilename(gcsBucket, filename));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}

View file

@ -1,207 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.backup;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.Iterators.peekingIterator;
import static com.google.domain.registry.backup.BackupUtils.createDeserializingIterator;
import static com.google.domain.registry.model.ofy.ObjectifyService.ofy;
import static java.util.Arrays.asList;
import com.google.appengine.api.datastore.DatastoreService;
import com.google.appengine.api.datastore.Entity;
import com.google.appengine.api.datastore.EntityTranslator;
import com.google.appengine.tools.cloudstorage.GcsFileMetadata;
import com.google.appengine.tools.cloudstorage.GcsService;
import com.google.common.base.Function;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.Lists;
import com.google.common.collect.PeekingIterator;
import com.google.domain.registry.config.RegistryEnvironment;
import com.google.domain.registry.model.ImmutableObject;
import com.google.domain.registry.model.ofy.CommitLogBucket;
import com.google.domain.registry.model.ofy.CommitLogCheckpoint;
import com.google.domain.registry.model.ofy.CommitLogCheckpointRoot;
import com.google.domain.registry.model.ofy.CommitLogManifest;
import com.google.domain.registry.model.ofy.CommitLogMutation;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.util.FormattingLogger;
import com.google.domain.registry.util.Retrier;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.Result;
import com.googlecode.objectify.util.ResultNow;
import org.joda.time.DateTime;
import java.io.IOException;
import java.io.InputStream;
import java.nio.channels.Channels;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.Callable;
import javax.inject.Inject;
/** Restore Registry 2 commit logs from GCS to datastore. */
@Action(
path = RestoreCommitLogsAction.PATH,
method = Action.Method.POST,
automaticallyPrintOk = true)
public class RestoreCommitLogsAction implements Runnable {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
static final int BLOCK_SIZE = 1024 * 1024; // Buffer 1mb at a time, for no particular reason.
static final String PATH = "/_dr/task/restoreCommitLogs";
static final String DRY_RUN_PARAM = "dryRun";
static final String FROM_TIME_PARAM = "fromTime";
@Inject GcsService gcsService;
@Inject @Parameter(DRY_RUN_PARAM) boolean dryRun;
@Inject @Parameter(FROM_TIME_PARAM) DateTime fromTime;
@Inject DatastoreService datastoreService;
@Inject GcsDiffFileLister diffLister;
@Inject Retrier retrier;
@Inject RestoreCommitLogsAction() {}
@Override
public void run() {
checkArgument( // safety
RegistryEnvironment.get() == RegistryEnvironment.ALPHA
|| RegistryEnvironment.get() == RegistryEnvironment.UNITTEST,
"DO NOT RUN ANYWHERE ELSE EXCEPT ALPHA OR TESTS.");
if (dryRun) {
logger.info("Running in dryRun mode");
}
List<GcsFileMetadata> diffFiles = diffLister.listDiffFiles(fromTime);
if (diffFiles.isEmpty()) {
logger.info("Nothing to restore");
return;
}
Map<Integer, DateTime> bucketTimestamps = new HashMap<>();
CommitLogCheckpoint lastCheckpoint = null;
for (GcsFileMetadata metadata : diffFiles) {
logger.info("Restoring: " + metadata.getFilename().getObjectName());
try (InputStream input = Channels.newInputStream(
gcsService.openPrefetchingReadChannel(metadata.getFilename(), 0, BLOCK_SIZE))) {
PeekingIterator<ImmutableObject> commitLogs =
peekingIterator(createDeserializingIterator(input));
lastCheckpoint = (CommitLogCheckpoint) commitLogs.next();
saveOfy(asList(lastCheckpoint)); // Save the checkpoint itself.
while (commitLogs.hasNext()) {
CommitLogManifest manifest = restoreOneTransaction(commitLogs);
bucketTimestamps.put(manifest.getBucketId(), manifest.getCommitTime());
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
// Restore the CommitLogCheckpointRoot and CommitLogBuckets.
saveOfy(FluentIterable.from(bucketTimestamps.entrySet())
.transform(new Function<Entry<Integer, DateTime>, ImmutableObject> () {
@Override
public ImmutableObject apply(Entry<Integer, DateTime> entry) {
return new CommitLogBucket.Builder()
.setBucketNum(entry.getKey())
.setLastWrittenTime(entry.getValue())
.build();
}})
.append(CommitLogCheckpointRoot.create(lastCheckpoint.getCheckpointTime())));
}
/**
* Restore the contents of one transaction to datastore.
*
* <p>The objects to delete are listed in the {@link CommitLogManifest}, which will be the first
* object in the iterable. The objects to save follow, each as a {@link CommitLogMutation}. We
* restore by deleting the deletes and recreating the saves from their proto form. We also save
* the commit logs themselves back to datastore, so that the commit log system itself is
* transparently restored alongside the data.
*
* @return the manifest, for use in restoring the {@link CommitLogBucket}.
*/
private CommitLogManifest restoreOneTransaction(PeekingIterator<ImmutableObject> commitLogs) {
final CommitLogManifest manifest = (CommitLogManifest) commitLogs.next();
Result<?> deleteResult = deleteAsync(manifest.getDeletions());
List<Entity> entitiesToSave = Lists.newArrayList(ofy().save().toEntity(manifest));
while (commitLogs.hasNext() && commitLogs.peek() instanceof CommitLogMutation) {
CommitLogMutation mutation = (CommitLogMutation) commitLogs.next();
entitiesToSave.add(ofy().save().toEntity(mutation));
entitiesToSave.add(EntityTranslator.createFromPbBytes(mutation.getEntityProtoBytes()));
}
saveRaw(entitiesToSave);
try {
deleteResult.now();
} catch (Exception e) {
retry(new Runnable() {
@Override
public void run() {
deleteAsync(manifest.getDeletions()).now();
}});
}
return manifest;
}
private void saveRaw(final List<Entity> entitiesToSave) {
if (dryRun) {
logger.info("Would have saved " + entitiesToSave);
return;
}
retry(new Runnable() {
@Override
public void run() {
datastoreService.put(entitiesToSave);
}});
}
private void saveOfy(final Iterable<? extends ImmutableObject> objectsToSave) {
if (dryRun) {
logger.info("Would have saved " + asList(objectsToSave));
return;
}
retry(new Runnable() {
@Override
public void run() {
ofy().saveWithoutBackup().entities(objectsToSave).now();
}});
}
private Result<?> deleteAsync(Set<Key<?>> keysToDelete) {
if (dryRun) {
logger.info("Would have deleted " + keysToDelete);
}
return dryRun || keysToDelete.isEmpty()
? new ResultNow<Void>(null)
: ofy().deleteWithoutBackup().entities(keysToDelete);
}
/** Retrier for saves and deletes, since we can't proceed with any failures. */
private void retry(final Runnable runnable) {
retrier.callWithRetry(
new Callable<Void>() {
@Override
public Void call() throws Exception {
runnable.run();
return null;
}},
RuntimeException.class);
}
}

View file

@ -1,16 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
@javax.annotation.ParametersAreNonnullByDefault
package com.google.domain.registry.backup;

View file

@ -1,32 +0,0 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
java_library(
name = "bigquery",
srcs = glob(["*.java"]),
visibility = ["//visibility:public"],
deps = [
"//apiserving/discoverydata/bigquery:bigqueryv2",
"//java/com/google/api/client/extensions/appengine/http",
"//java/com/google/api/client/googleapis/auth/oauth2",
"//java/com/google/api/client/googleapis/extensions/appengine/auth/oauth2",
"//java/com/google/api/client/googleapis/json",
"//java/com/google/api/client/http",
"//java/com/google/api/client/json",
"//java/com/google/api/client/json/jackson2",
"//java/com/google/common/annotations",
"//java/com/google/common/base",
"//java/com/google/common/collect",
"//java/com/google/common/io",
"//java/com/google/common/util/concurrent",
"//java/com/google/domain/registry/config",
"//java/com/google/domain/registry/request",
"//java/com/google/domain/registry/util",
"//third_party/java/dagger",
"//third_party/java/joda_time",
"//third_party/java/jsr305_annotations",
"//third_party/java/jsr330_inject",
],
)

View file

@ -1,775 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.bigquery;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.base.Strings.isNullOrEmpty;
import static com.google.common.base.Verify.verify;
import static com.google.domain.registry.bigquery.BigqueryUtils.toJobReferenceString;
import static org.joda.time.DateTimeZone.UTC;
import com.google.api.client.googleapis.auth.oauth2.GoogleCredential;
import com.google.api.client.googleapis.json.GoogleJsonResponseException;
import com.google.api.client.http.AbstractInputStreamContent;
import com.google.api.client.http.HttpRequestInitializer;
import com.google.api.client.http.HttpTransport;
import com.google.api.client.json.JsonFactory;
import com.google.api.services.bigquery.Bigquery;
import com.google.api.services.bigquery.model.Dataset;
import com.google.api.services.bigquery.model.DatasetReference;
import com.google.api.services.bigquery.model.ErrorProto;
import com.google.api.services.bigquery.model.GetQueryResultsResponse;
import com.google.api.services.bigquery.model.Job;
import com.google.api.services.bigquery.model.JobConfiguration;
import com.google.api.services.bigquery.model.JobConfigurationExtract;
import com.google.api.services.bigquery.model.JobConfigurationLoad;
import com.google.api.services.bigquery.model.JobConfigurationQuery;
import com.google.api.services.bigquery.model.JobReference;
import com.google.api.services.bigquery.model.JobStatistics;
import com.google.api.services.bigquery.model.JobStatus;
import com.google.api.services.bigquery.model.Table;
import com.google.api.services.bigquery.model.TableCell;
import com.google.api.services.bigquery.model.TableFieldSchema;
import com.google.api.services.bigquery.model.TableReference;
import com.google.api.services.bigquery.model.TableRow;
import com.google.api.services.bigquery.model.ViewDefinition;
import com.google.common.base.Function;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableTable;
import com.google.common.io.BaseEncoding;
import com.google.common.util.concurrent.AsyncFunction;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.domain.registry.bigquery.BigqueryUtils.DestinationFormat;
import com.google.domain.registry.bigquery.BigqueryUtils.SourceFormat;
import com.google.domain.registry.bigquery.BigqueryUtils.TableType;
import com.google.domain.registry.bigquery.BigqueryUtils.WriteDisposition;
import com.google.domain.registry.config.RegistryEnvironment;
import com.google.domain.registry.util.FormattingLogger;
import com.google.domain.registry.util.NonFinalForTesting;
import com.google.domain.registry.util.Sleeper;
import com.google.domain.registry.util.SqlTemplate;
import com.google.domain.registry.util.SystemSleeper;
import org.joda.time.DateTime;
import org.joda.time.Duration;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import javax.annotation.Nullable;
/** Class encapsulating parameters and state for accessing the Bigquery API. */
public class BigqueryConnection implements AutoCloseable {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
private static final Duration MIN_POLL_INTERVAL = Duration.millis(500);
@NonFinalForTesting
private static Sleeper sleeper = new SystemSleeper();
/** Default name of the default dataset to use for requests to the API. */
public static final String DEFAULT_DATASET_NAME = "testing";
/** Default dataset to use for storing temporary tables. */
private static final String TEMP_DATASET_NAME = "__temp__";
/** Default time to live for temporary tables. */
private static final Duration TEMP_TABLE_TTL = Duration.standardHours(24);
/** Bigquery client instance wrapped by this class. */
private Bigquery bigquery;
/** Executor service for bigquery jobs. */
private ListeningExecutorService service;
/** Credential object to use for initializing HTTP requests to the bigquery API. */
private HttpRequestInitializer credential;
/** HTTP transport object to use for accessing bigquery API. */
private HttpTransport httpTransport;
/** JSON factory object to use for accessing bigquery API. */
private JsonFactory jsonFactory;
/** Pseudo-randomness source to use for creating random table names. */
private Random random = new Random();
/** Name of the default dataset to use for inserting tables. */
private String datasetId = DEFAULT_DATASET_NAME;
/** Whether to automatically overwrite existing tables and views. */
private boolean overwrite = false;
/** Duration to wait between polls for job status. */
private Duration pollInterval = Duration.millis(1000);
/** Builder for a {@link BigqueryConnection}, since the latter is immutable once created. */
public static class Builder {
private BigqueryConnection instance;
public Builder() {
instance = new BigqueryConnection();
}
/**
* The BigqueryConnection takes ownership of this {@link ExecutorService} and will
* shut it down when the BigqueryConnection is closed.
*/
public Builder setExecutorService(ExecutorService executorService) {
instance.service = MoreExecutors.listeningDecorator(executorService);
return this;
}
public Builder setCredential(GoogleCredential credential) {
instance.credential = checkNotNull(credential);
instance.httpTransport = credential.getTransport();
instance.jsonFactory = credential.getJsonFactory();
return this;
}
public Builder setDatasetId(String datasetId) {
instance.datasetId = checkNotNull(datasetId);
return this;
}
public Builder setOverwrite(boolean overwrite) {
instance.overwrite = overwrite;
return this;
}
public Builder setPollInterval(Duration pollInterval) {
checkArgument(
!pollInterval.isShorterThan(MIN_POLL_INTERVAL),
"poll interval must be at least %ldms", MIN_POLL_INTERVAL.getMillis());
instance.pollInterval = pollInterval;
return this;
}
public BigqueryConnection build() {
try {
checkNotNull(instance.service, "Must provide executor service");
return instance;
} finally {
// Clear the internal instance so you can't accidentally mutate it through this builder.
instance = null;
}
}
}
/**
* Class that wraps a normal Bigquery API Table object to make it immutable from the client side
* and give it additional semantics as a "destination" for load or query jobs, with an overwrite
* flag set by the client upon creation.
* <p>
* Additionally provides encapsulation so that clients of BigqueryConnection don't need to take
* any direct dependencies on Bigquery API classes and can instead use DestinationTable.
*/
public static class DestinationTable {
/** The wrapped Bigquery API Table object. */
private final Table table;
/** The type of this table. */
private final TableType type;
/** The write disposition for jobs writing to this destination table. */
private final WriteDisposition writeDisposition;
/**
* A query to package with this table if the type is VIEW; not immutable but also not visible
* to clients.
*/
private String query;
/** A builder for DestinationTable. */
public static final class Builder {
private final Table table = new Table();
private final TableReference tableRef = new TableReference();
private TableType type = TableType.TABLE;
private WriteDisposition writeDisposition = WriteDisposition.WRITE_EMPTY;
public Builder datasetId(String datasetId) {
tableRef.setDatasetId(datasetId);
return this;
}
public Builder name(String name) {
tableRef.setTableId(name);
return this;
}
public Builder description(String description) {
table.setDescription(description);
return this;
}
public Builder type(TableType type) {
this.type = type;
return this;
}
public Builder timeToLive(Duration duration) {
this.table.setExpirationTime(new DateTime(UTC).plus(duration).getMillis());
return this;
}
public Builder overwrite(boolean overwrite) {
if (overwrite) {
this.writeDisposition = WriteDisposition.WRITE_TRUNCATE;
}
return this;
}
public Builder append(boolean append) {
if (append) {
this.writeDisposition = WriteDisposition.WRITE_APPEND;
}
return this;
}
public DestinationTable build() {
tableRef.setProjectId(getEnvironmentProjectId());
table.setTableReference(tableRef);
checkState(!isNullOrEmpty(table.getTableReference().getDatasetId()));
checkState(!isNullOrEmpty(table.getTableReference().getTableId()));
return new DestinationTable(this);
}
}
/** Constructs a new DestinationTable from its Builder. */
private DestinationTable(Builder b) {
table = b.table.clone();
type = b.type;
writeDisposition = b.writeDisposition;
}
/**
* Stores the provided query with this DestinationTable and returns it; used for packaging
* a query along with the DestinationTable before sending it to the table update logic.
*/
private DestinationTable withQuery(String query) {
checkState(type == TableType.VIEW);
this.query = query;
return this;
}
/** Returns a new copy of the Bigquery API Table object wrapped by this DestinationTable. */
private Table getTable() {
Table tableCopy = table.clone();
if (type == TableType.VIEW) {
tableCopy.setView(new ViewDefinition().setQuery(query));
}
return tableCopy;
}
/** Returns the write disposition that should be used for jobs writing to this table. */
private WriteDisposition getWriteDisposition() {
return writeDisposition;
}
/** Returns a new copy of the TableReference for the Table wrapped by this DestinationTable. */
private TableReference getTableReference() {
return table.getTableReference().clone();
}
/** Returns a string representation of the TableReference for the wrapped table. */
public String getStringReference() {
return tableReferenceToString(table.getTableReference());
}
/** Returns a string representation of the given TableReference. */
private static String tableReferenceToString(TableReference tableRef) {
return String.format(
"%s:%s.%s",
tableRef.getProjectId(),
tableRef.getDatasetId(),
tableRef.getTableId());
}
}
/**
* Initializes the BigqueryConnection object by setting up the API client and creating the
* default dataset if it doesn't exist.
*/
public BigqueryConnection initialize() throws Exception {
bigquery = new Bigquery.Builder(httpTransport, jsonFactory, credential)
.setApplicationName(getClass().getSimpleName())
.build();
createDatasetIfNeeded(datasetId);
createDatasetIfNeeded(TEMP_DATASET_NAME);
return this;
}
/**
* Closes the BigqueryConnection object by shutting down the executor service. Clients
* should only call this after all ListenableFutures obtained from BigqueryConnection methods
* have resolved; this method does not block on their completion.
*/
@Override
public void close() {
service.shutdown();
}
/** Returns a partially built DestinationTable with the default dataset and overwrite behavior. */
public DestinationTable.Builder buildDestinationTable(String tableName) {
return new DestinationTable.Builder()
.datasetId(datasetId)
.type(TableType.TABLE)
.name(tableName)
.overwrite(overwrite);
}
/**
* Returns a partially built DestinationTable with a randomly generated name under the default
* temporary table dataset, with the default TTL and overwrite behavior.
*/
public DestinationTable.Builder buildTemporaryTable() {
return new DestinationTable.Builder()
.datasetId(TEMP_DATASET_NAME)
.type(TableType.TABLE)
.name(getRandomTableName())
.timeToLive(TEMP_TABLE_TTL)
.overwrite(overwrite);
}
/** Returns a random table name consisting only of the chars {@code [a-v0-9_]}. */
private String getRandomTableName() {
byte[] randBytes = new byte[8]; // 64 bits of randomness ought to be plenty.
random.nextBytes(randBytes);
return "_" + BaseEncoding.base32Hex().lowerCase().omitPadding().encode(randBytes);
}
/**
* A function that updates the specified Bigquery table to reflect the metadata from the input
* DestinationTable, passing the same DestinationTable through as the output. If the specified
* table does not already exist, it will be inserted into the dataset.
* <p>
* Clients can call this function directly to update a table on demand, or can pass it to
* Futures.transform() to update a table produced as the asynchronous result of a load or query
* job (e.g. to add a description to it).
*/
private class UpdateTableFunction implements Function<DestinationTable, DestinationTable> {
@Override
public DestinationTable apply(final DestinationTable destinationTable) {
Table table = destinationTable.getTable();
TableReference ref = table.getTableReference();
try {
if (checkTableExists(ref.getDatasetId(), ref.getTableId())) {
bigquery.tables()
.update(ref.getProjectId(), ref.getDatasetId(), ref.getTableId(), table)
.execute();
} else {
bigquery.tables()
.insert(ref.getProjectId(), ref.getDatasetId(), table)
.execute();
}
return destinationTable;
} catch (IOException e) {
throw BigqueryJobFailureException.create(e);
}
}
}
/**
* Starts an asynchronous load job to populate the specified destination table with the given
* source URIs and source format. Returns a ListenableFuture that holds the same destination
* table object on success.
*/
public ListenableFuture<DestinationTable> load(
DestinationTable dest,
SourceFormat sourceFormat,
Iterable<String> sourceUris) throws Exception {
Job job = new Job()
.setConfiguration(new JobConfiguration()
.setLoad(new JobConfigurationLoad()
.setWriteDisposition(dest.getWriteDisposition().toString())
.setSourceFormat(sourceFormat.toString())
.setSourceUris(ImmutableList.copyOf(sourceUris))
.setDestinationTable(dest.getTableReference())));
return Futures.transform(runJobToCompletion(job, dest), new UpdateTableFunction());
}
/**
* Starts an asynchronous query job to populate the specified destination table with the results
* of the specified query, or if the table is a view, to update the view to reflect that query.
* Returns a ListenableFuture that holds the same destination table object on success.
*/
public ListenableFuture<DestinationTable> query(
String querySql,
DestinationTable dest) {
if (dest.type == TableType.VIEW) {
// Use Futures.transform() rather than calling apply() directly so that any exceptions thrown
// by calling UpdateTableFunction will be propagated on the get() call, not from here.
return Futures.transform(
Futures.immediateFuture(dest.withQuery(querySql)), new UpdateTableFunction());
} else {
Job job = new Job()
.setConfiguration(new JobConfiguration()
.setQuery(new JobConfigurationQuery()
.setQuery(querySql)
.setDefaultDataset(getDataset())
.setWriteDisposition(dest.getWriteDisposition().toString())
.setDestinationTable(dest.getTableReference())));
return Futures.transform(runJobToCompletion(job, dest), new UpdateTableFunction());
}
}
/**
* Starts an asynchronous query job to dump the results of the specified query into a local
* ImmutableTable object, row-keyed by the row number (indexed from 1), column-keyed by the
* TableFieldSchema for that column, and with the value object as the cell value. Note that null
* values will not actually be null, but they can be checked for using Data.isNull().
* <p>
* Returns a ListenableFuture that holds the ImmutableTable on success.
*/
public ListenableFuture<ImmutableTable<Integer, TableFieldSchema, Object>>
queryToLocalTable(String querySql) throws Exception {
Job job = new Job()
.setConfiguration(new JobConfiguration()
.setQuery(new JobConfigurationQuery()
.setQuery(querySql)
.setDefaultDataset(getDataset())));
return Futures.transform(
runJobToCompletion(job),
new Function<Job, ImmutableTable<Integer, TableFieldSchema, Object>>() {
@Override
public ImmutableTable<Integer, TableFieldSchema, Object> apply(Job job) {
return getQueryResults(job);
}});
}
/**
* Returns the query results for the given job as an ImmutableTable, row-keyed by row number
* (indexed from 1), column-keyed by the TableFieldSchema for that field, and with the value
* object as the cell value. Note that null values will not actually be null (since we're using
* ImmutableTable) but they can be checked for using Data.isNull().
* <p>
* This table is fully materialized in memory (not lazily loaded), so it should not be used with
* queries expected to return large results.
*/
private ImmutableTable<Integer, TableFieldSchema, Object> getQueryResults(Job job) {
try {
ImmutableTable.Builder<Integer, TableFieldSchema, Object> builder =
new ImmutableTable.Builder<>();
String pageToken = null;
int rowNumber = 1;
while (true) {
GetQueryResultsResponse queryResults = bigquery.jobs()
.getQueryResults(getProjectId(), job.getJobReference().getJobId())
.setPageToken(pageToken)
.execute();
// If the job isn't complete yet, retry; getQueryResults() waits for up to 10 seconds on
// each invocation so this will effectively poll for completion.
if (queryResults.getJobComplete()) {
List<TableFieldSchema> schemaFields = queryResults.getSchema().getFields();
for (TableRow row : queryResults.getRows()) {
Iterator<TableFieldSchema> fieldIterator = schemaFields.iterator();
Iterator<TableCell> cellIterator = row.getF().iterator();
while (fieldIterator.hasNext() && cellIterator.hasNext()) {
builder.put(rowNumber, fieldIterator.next(), cellIterator.next().getV());
}
rowNumber++;
}
pageToken = queryResults.getPageToken();
if (pageToken == null) {
break;
}
}
}
return builder.build();
} catch (IOException e) {
throw BigqueryJobFailureException.create(e);
}
}
/**
* Starts an asynchronous job to extract the specified source table and output it to the
* given GCS filepath in the specified destination format, optionally printing headers.
* Returns a ListenableFuture that holds the destination GCS URI on success.
*/
private ListenableFuture<String> extractTable(
DestinationTable sourceTable,
String destinationUri,
DestinationFormat destinationFormat,
boolean printHeader) {
checkArgument(sourceTable.type == TableType.TABLE);
Job job = new Job()
.setConfiguration(new JobConfiguration()
.setExtract(new JobConfigurationExtract()
.setSourceTable(sourceTable.getTableReference())
.setDestinationFormat(destinationFormat.toString())
.setDestinationUris(ImmutableList.of(destinationUri))
.setPrintHeader(printHeader)));
return runJobToCompletion(job, destinationUri);
}
/**
* Starts an asynchronous job to extract the specified source table or view and output it to the
* given GCS filepath in the specified destination format, optionally printing headers.
* Returns a ListenableFuture that holds the destination GCS URI on success.
*/
public ListenableFuture<String> extract(
DestinationTable sourceTable,
String destinationUri,
DestinationFormat destinationFormat,
boolean printHeader) {
if (sourceTable.type == TableType.TABLE) {
return extractTable(sourceTable, destinationUri, destinationFormat, printHeader);
} else {
// We can't extract directly from a view, so instead extract from a query dumping that view.
return extractQuery(
SqlTemplate
.create("SELECT * FROM [%DATASET%.%TABLE%]")
.put("DATASET", sourceTable.getTableReference().getDatasetId())
.put("TABLE", sourceTable.getTableReference().getTableId())
.build(),
destinationUri,
destinationFormat,
printHeader);
}
}
/**
* Starts an asynchronous job to run the provided query, store the results in a temporary table,
* and then extract the contents of that table to the given GCS filepath in the specified
* destination format, optionally printing headers.
* <p>
* Returns a ListenableFuture that holds the destination GCS URI on success.
*/
public ListenableFuture<String> extractQuery(
String querySql,
final String destinationUri,
final DestinationFormat destinationFormat,
final boolean printHeader) {
// Note: although BigQuery queries save their results to an auto-generated anonymous table,
// we can't rely on that for running the extract job because it may not be fully replicated.
// Tracking bug for query-to-GCS support is b/13777340.
DestinationTable tempTable = buildTemporaryTable().build();
return Futures.transformAsync(
query(querySql, tempTable), new AsyncFunction<DestinationTable, String>() {
@Override
public ListenableFuture<String> apply(DestinationTable tempTable) {
return extractTable(tempTable, destinationUri, destinationFormat, printHeader);
}
});
}
/** @see #runJob(Job, AbstractInputStreamContent) */
public Job runJob(Job job) {
return runJob(job, null);
}
/**
* Lanuch a job, wait for it to complete, but <i>do not</i> check for errors.
*
* @throws BigqueryJobFailureException
*/
public Job runJob(Job job, @Nullable AbstractInputStreamContent data) {
return checkJob(waitForJob(launchJob(job, data)));
}
/**
* Lanuch a job, but do not wait for it to complete.
*
* @throws BigqueryJobFailureException
*/
private Job launchJob(Job job, @Nullable AbstractInputStreamContent data) {
verify(job.getStatus() == null);
try {
return data != null
? bigquery.jobs().insert(getProjectId(), job, data).execute()
: bigquery.jobs().insert(getProjectId(), job).execute();
} catch (IOException e) {
throw BigqueryJobFailureException.create(e);
}
}
/**
* Synchronously waits for a job to complete that's already been launched.
*
* @throws BigqueryJobFailureException
*/
private Job waitForJob(Job job) {
verify(job.getStatus() != null);
while (!job.getStatus().getState().equals("DONE")) {
sleeper.sleepUninterruptibly(pollInterval);
JobReference ref = job.getJobReference();
try {
job = bigquery.jobs().get(ref.getProjectId(), ref.getJobId()).execute();
} catch (IOException e) {
throw BigqueryJobFailureException.create(e);
}
}
return job;
}
/**
* Checks completed job for errors.
*
* @throws BigqueryJobFailureException
*/
private static Job checkJob(Job job) {
verify(job.getStatus() != null);
JobStatus jobStatus = job.getStatus();
if (jobStatus.getErrorResult() != null) {
throw BigqueryJobFailureException.create(jobStatus);
} else {
logger.info(summarizeCompletedJob(job));
if (jobStatus.getErrors() != null) {
for (ErrorProto error : jobStatus.getErrors()) {
logger.warning(String.format("%s: %s", error.getReason(), error.getMessage()));
}
}
return job;
}
}
/** Returns a summarization of a completed job's statistics for logging. */
private static String summarizeCompletedJob(Job job) {
JobStatistics stats = job.getStatistics();
return String.format(
"Job took %,.3f seconds after a %,.3f second delay and processed %,d bytes (%s)",
(stats.getEndTime() - stats.getStartTime()) / 1000.0,
(stats.getStartTime() - stats.getCreationTime()) / 1000.0,
stats.getTotalBytesProcessed(),
toJobReferenceString(job.getJobReference()));
}
private <T> ListenableFuture<T> runJobToCompletion(Job job, T result) {
return runJobToCompletion(job, result, null);
}
/** Runs job and returns a future that yields {@code result} when {@code job} is completed. */
private <T> ListenableFuture<T> runJobToCompletion(
final Job job,
final T result,
@Nullable final AbstractInputStreamContent data) {
return service.submit(new Callable<T>() {
@Override
public T call() {
runJob(job, data);
return result;
}});
}
private ListenableFuture<Job> runJobToCompletion(final Job job) {
return service.submit(new Callable<Job>() {
@Override
public Job call() {
return runJob(job, null);
}});
}
/** Helper that returns true if a dataset with this name exists. */
public boolean checkDatasetExists(String datasetName) throws IOException {
try {
bigquery.datasets().get(getProjectId(), datasetName).execute();
return true;
} catch (GoogleJsonResponseException e) {
if (e.getDetails().getCode() == 404) {
return false;
}
throw e;
}
}
/** Helper that returns true if a table with this name and dataset name exists. */
public boolean checkTableExists(String datasetName, String tableName) throws IOException {
try {
bigquery.tables().get(getProjectId(), datasetName, tableName).execute();
return true;
} catch (GoogleJsonResponseException e) {
if (e.getDetails().getCode() == 404) {
return false;
}
throw e;
}
}
/** Returns the projectId set by the environment, or {@code null} if none is set. */
public static String getEnvironmentProjectId() {
return RegistryEnvironment.get().config().getProjectId();
}
/** Returns the projectId associated with this bigquery connection. */
public String getProjectId() {
return getEnvironmentProjectId();
}
/** Returns the dataset name that this bigquery connection uses by default. */
public String getDatasetId() {
return datasetId;
}
/** Returns dataset reference that can be used to avoid having to specify dataset in SQL code. */
public DatasetReference getDataset() {
return new DatasetReference()
.setProjectId(getProjectId())
.setDatasetId(getDatasetId());
}
/** Returns table reference with the projectId and datasetId filled out for you. */
public TableReference getTable(String tableName) {
return new TableReference()
.setProjectId(getProjectId())
.setDatasetId(getDatasetId())
.setTableId(tableName);
}
/**
* Helper that creates a dataset with this name if it doesn't already exist, and returns true
* if creation took place.
*/
public boolean createDatasetIfNeeded(String datasetName) throws IOException {
if (!checkDatasetExists(datasetName)) {
bigquery.datasets()
.insert(getProjectId(), new Dataset().setDatasetReference(new DatasetReference()
.setProjectId(getProjectId())
.setDatasetId(datasetName)))
.execute();
System.err.printf("Created dataset: %s:%s\n", getProjectId(), datasetName);
return true;
}
return false;
}
/** Create a table from a SQL query if it doesn't already exist. */
public TableReference ensureTable(TableReference table, String sqlQuery) {
try {
runJob(new Job()
.setConfiguration(new JobConfiguration()
.setQuery(new JobConfigurationQuery()
.setQuery(sqlQuery)
.setDefaultDataset(getDataset())
.setDestinationTable(table))));
} catch (BigqueryJobFailureException e) {
if (e.getReason().equals("duplicate")) {
// Table already exists.
} else {
throw e;
}
}
return table;
}
}

View file

@ -1,167 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.bigquery;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.Sets.newConcurrentHashSet;
import static com.google.domain.registry.util.FormattingLogger.getLoggerForCallerClass;
import com.google.api.client.extensions.appengine.http.UrlFetchTransport;
import com.google.api.client.googleapis.extensions.appengine.auth.oauth2.AppIdentityCredential;
import com.google.api.client.http.HttpRequestInitializer;
import com.google.api.client.http.HttpTransport;
import com.google.api.client.json.JsonFactory;
import com.google.api.client.json.jackson2.JacksonFactory;
import com.google.api.services.bigquery.Bigquery;
import com.google.api.services.bigquery.BigqueryScopes;
import com.google.api.services.bigquery.model.Dataset;
import com.google.api.services.bigquery.model.DatasetReference;
import com.google.api.services.bigquery.model.Table;
import com.google.api.services.bigquery.model.TableFieldSchema;
import com.google.api.services.bigquery.model.TableReference;
import com.google.api.services.bigquery.model.TableSchema;
import com.google.common.collect.ImmutableList;
import com.google.domain.registry.util.FormattingLogger;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.inject.Inject;
/** Factory for creating {@link Bigquery} connections. */
public class BigqueryFactory {
private static final FormattingLogger logger = getLoggerForCallerClass();
// Cross-request caches to avoid unnecessary RPCs.
private static Set<String> knownExistingDatasets = newConcurrentHashSet();
private static Set<String> knownExistingTables = newConcurrentHashSet();
@Inject Map<String, ImmutableList<TableFieldSchema>> bigquerySchemas;
@Inject Subfactory subfactory;
@Inject BigqueryFactory() {}
/** This class is broken out solely so that it can be mocked inside of tests. */
static class Subfactory {
@Inject Subfactory() {}
public Bigquery create(
String applicationName,
HttpTransport transport,
JsonFactory jsonFactory,
HttpRequestInitializer httpRequestInitializer) {
return new Bigquery.Builder(transport, jsonFactory, httpRequestInitializer)
.setApplicationName(applicationName)
.build();
}
}
/** Returns a new connection to BigQuery. */
public Bigquery create(
String applicationName,
HttpTransport transport,
JsonFactory jsonFactory,
HttpRequestInitializer httpRequestInitializer) {
return subfactory.create(applicationName, transport, jsonFactory, httpRequestInitializer);
}
/**
* Returns a new connection to Bigquery, first ensuring that the given dataset exists in the
* project with the given id, creating it if required.
*/
public Bigquery create(String projectId, String datasetId) throws IOException {
Bigquery bigquery = create(
getClass().getSimpleName(),
new UrlFetchTransport(),
new JacksonFactory(),
new AppIdentityCredential(BigqueryScopes.all()));
// Note: it's safe for multiple threads to call this as the dataset will only be created once.
if (!knownExistingDatasets.contains(datasetId)) {
ensureDataset(bigquery, projectId, datasetId);
knownExistingDatasets.add(datasetId);
}
return bigquery;
}
/**
* Returns a new connection to Bigquery, first ensuring that the given dataset and table exist in
* project with the given id, creating them if required.
*/
public Bigquery create(String projectId, String datasetId, String tableId)
throws IOException {
Bigquery bigquery = create(projectId, datasetId);
checkArgument(bigquerySchemas.containsKey(tableId), "Unknown table ID: %s", tableId);
if (!knownExistingTables.contains(tableId)) {
ensureTable(
bigquery,
new TableReference()
.setDatasetId(datasetId)
.setProjectId(projectId)
.setTableId(tableId),
bigquerySchemas.get(tableId));
knownExistingTables.add(tableId);
}
return bigquery;
}
/**
* Ensures the dataset exists by trying to create it. Note that it's not appreciably cheaper
* to check for dataset existence than it is to try to create it and check for exceptions.
*/
// Note that these are not static so they can be mocked for testing.
private void ensureDataset(Bigquery bigquery, String projectId, String datasetId)
throws IOException {
try {
bigquery.datasets()
.insert(projectId,
new Dataset().setDatasetReference(
new DatasetReference()
.setProjectId(projectId)
.setDatasetId(datasetId)))
.execute();
} catch (IOException e) {
// Swallow errors about a duplicate dataset, and throw any other ones.
if (!BigqueryJobFailureException.create(e).getReason().equals("duplicate")) {
throw e;
}
}
}
/** Ensures the table exists in Bigquery. */
private void ensureTable(Bigquery bigquery, TableReference table, List<TableFieldSchema> schema)
throws IOException {
try {
bigquery.tables().insert(table.getProjectId(), table.getDatasetId(), new Table()
.setSchema(new TableSchema().setFields(schema))
.setTableReference(table))
.execute();
logger.infofmt("Created BigQuery table %s:%s.%s", table.getProjectId(), table.getDatasetId(),
table.getTableId());
} catch (IOException e) {
// Swallow errors about a table that exists, and throw any other ones.
if (!BigqueryJobFailureException.create(e).getReason().equals("duplicate")) {
throw e;
}
}
}
}

View file

@ -1,120 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.bigquery;
import static com.google.common.base.Preconditions.checkArgument;
import com.google.api.client.googleapis.json.GoogleJsonError;
import com.google.api.client.googleapis.json.GoogleJsonResponseException;
import com.google.api.services.bigquery.model.ErrorProto;
import com.google.api.services.bigquery.model.JobStatus;
import com.google.common.collect.Iterables;
import java.io.IOException;
import javax.annotation.Nullable;
/** Generic exception to throw if a Bigquery job fails. */
public final class BigqueryJobFailureException extends RuntimeException {
/** Delegate {@link IOException} errors, checking for {@link GoogleJsonResponseException} */
public static BigqueryJobFailureException create(IOException cause) {
if (cause instanceof GoogleJsonResponseException) {
return create(((GoogleJsonResponseException) cause).getDetails());
} else {
return new BigqueryJobFailureException(cause.getMessage(), cause, null, null);
}
}
/** Create an error for JSON server response errors. */
public static BigqueryJobFailureException create(GoogleJsonError error) {
return new BigqueryJobFailureException(error.getMessage(), null, null, error);
}
/** Create an error from a failed job. */
public static BigqueryJobFailureException create(JobStatus jobStatus) {
checkArgument(jobStatus.getErrorResult() != null, "this job didn't fail!");
return new BigqueryJobFailureException(
describeError(jobStatus.getErrorResult()), null, jobStatus, null);
}
@Nullable
private final JobStatus jobStatus;
@Nullable
private final GoogleJsonError jsonError;
private BigqueryJobFailureException(
String message,
@Nullable Throwable cause,
@Nullable JobStatus jobStatus,
@Nullable GoogleJsonError jsonError) {
super(message, cause);
this.jobStatus = jobStatus;
this.jsonError = jsonError;
}
/**
* Returns a short error code describing why this job failed.
*
* <h3>Sample Reasons</h3>
*
* <ul>
* <li>{@code "duplicate"}: The table you're trying to create already exists.
* <li>{@code "invalidQuery"}: Query syntax error of some sort.
* <li>{@code "unknown"}: Non-Bigquery errors.
* </ul>
*
* @see "https://cloud.google.com/bigquery/troubleshooting-errors"
*/
public String getReason() {
if (jobStatus != null) {
return jobStatus.getErrorResult().getReason();
} else if (jsonError != null) {
return Iterables.getLast(jsonError.getErrors()).getReason();
} else {
return "unknown";
}
}
@Override
public String toString() {
StringBuilder result = new StringBuilder();
result.append(String.format("%s: %s", getClass().getSimpleName(), getMessage()));
try {
if (jobStatus != null) {
for (ErrorProto error : jobStatus.getErrors()) {
result.append("\n---------------------------------- BEGIN DEBUG INFO\n");
result.append(describeError(error));
result.append('\n');
result.append(error.getDebugInfo());
result.append("\n---------------------------------- END DEBUG INFO");
}
}
if (jsonError != null) {
String extraInfo = jsonError.toPrettyString();
result.append('\n');
result.append(extraInfo);
}
} catch (IOException e) {
result.append(e);
}
return result.toString();
}
private static String describeError(ErrorProto error) {
return String.format("%s: %s", error.getReason(), error.getMessage());
}
}

View file

@ -1,72 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.bigquery;
import static dagger.Provides.Type.SET_VALUES;
import com.google.api.client.http.HttpRequestInitializer;
import com.google.api.client.http.HttpTransport;
import com.google.api.client.json.JsonFactory;
import com.google.api.services.bigquery.Bigquery;
import com.google.api.services.bigquery.BigqueryScopes;
import com.google.api.services.bigquery.model.TableFieldSchema;
import com.google.common.collect.ImmutableList;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.request.OAuthScopes;
import dagger.Module;
import dagger.Multibindings;
import dagger.Provides;
import java.util.Map;
import java.util.Set;
/**
* Dagger module for Google {@link Bigquery} connection objects.
*
* @see com.google.domain.registry.config.ConfigModule
* @see com.google.domain.registry.request.Modules.UrlFetchTransportModule
* @see com.google.domain.registry.request.Modules.Jackson2Module
* @see com.google.domain.registry.request.Modules.AppIdentityCredentialModule
* @see com.google.domain.registry.request.Modules.UseAppIdentityCredentialForGoogleApisModule
*/
@Module
public final class BigqueryModule {
@Multibindings
interface BigQueryMultibindings {
/** Provides a map of BigQuery table names to field names. */
Map<String, ImmutableList<TableFieldSchema>> bigquerySchemas();
}
/** Provides OAuth2 scopes for the Bigquery service needed by Domain Registry. */
@Provides(type = SET_VALUES)
@OAuthScopes
static Set<String> provideBigqueryOAuthScopes() {
return BigqueryScopes.all();
}
@Provides
static Bigquery provideBigquery(
HttpTransport transport,
JsonFactory jsonFactory,
HttpRequestInitializer httpRequestInitializer,
@Config("projectId") String projectId) {
return new Bigquery.Builder(transport, jsonFactory, httpRequestInitializer)
.setApplicationName(projectId)
.build();
}
}

View file

@ -1,169 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.bigquery;
import com.google.api.services.bigquery.model.JobReference;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormatter;
import org.joda.time.format.DateTimeFormatterBuilder;
import org.joda.time.format.DateTimeParser;
import org.joda.time.format.ISODateTimeFormat;
import java.util.concurrent.TimeUnit;
/** Utilities related to Bigquery. */
public class BigqueryUtils {
/** Bigquery modes for schema fields. */
public enum FieldMode {
NULLABLE,
REQUIRED,
REPEATED;
/** Return the name of the field mode as it should appear in the Bigquery schema. */
public String schemaName() {
return name();
}
}
/** Bigquery schema field types. */
public enum FieldType {
STRING,
INTEGER,
FLOAT,
TIMESTAMP,
RECORD,
BOOLEAN;
/** Return the name of the field type as it should appear in the Bigquery schema. */
public String schemaName() {
return name();
}
}
/** Source formats for Bigquery load jobs. */
public enum SourceFormat {
CSV,
NEWLINE_DELIMITED_JSON,
DATASTORE_BACKUP
}
/** Destination formats for Bigquery extract jobs. */
public enum DestinationFormat {
CSV,
NEWLINE_DELIMITED_JSON
}
/** Bigquery table types (i.e. regular table or view). */
public enum TableType {
TABLE,
VIEW
}
/**
* Bigquery write dispositions (i.e. what to do about writing to an existing table).
*
* @see <a href="https://developers.google.com/bigquery/docs/reference/v2/jobs">API docs</a>
*/
public enum WriteDisposition {
/** Only write to the table if there is no existing table or if it is empty. */
WRITE_EMPTY,
/** If the table already exists, overwrite it with the new data. */
WRITE_TRUNCATE,
/** If the table already exists, append the data to the table. */
WRITE_APPEND
}
/**
* A {@code DateTimeFormatter} that defines how to print DateTimes in a string format that
* BigQuery can interpret and how to parse the string formats that BigQuery emits into DateTimes.
* <p>
* The general format definition is "YYYY-MM-DD HH:MM:SS.SSS[ ZZ]", where the fractional seconds
* portion can have 0-6 decimal places (although we restrict it to 0-3 here since Joda DateTime
* only supports up to millisecond precision) and the zone if not specified defaults to UTC.
* <p>
* Although we expect a zone specification of "UTC" when parsing, we don't emit it when printing
* because in some cases BigQuery does not allow any time zone specification (instead it assumes
* UTC for whatever input you provide) for input timestamp strings (see b/16380363).
*
* @see "https://developers.google.com/bigquery/timestamp"
*/
public static final DateTimeFormatter BIGQUERY_TIMESTAMP_FORMAT = new DateTimeFormatterBuilder()
.append(ISODateTimeFormat.date())
.appendLiteral(' ')
.append(
// For printing, always print out the milliseconds.
ISODateTimeFormat.hourMinuteSecondMillis().getPrinter(),
// For parsing, we need a series of parsers to correctly handle the milliseconds.
new DateTimeParser[] {
// Try to parse the time with milliseconds first, which requires at least one
// fractional second digit, and if that fails try to parse without milliseconds.
ISODateTimeFormat.hourMinuteSecondMillis().getParser(),
ISODateTimeFormat.hourMinuteSecond().getParser()})
// Print UTC as the empty string since BigQuery's TIMESTAMP() function does not accept any
// time zone specification, but require "UTC" on parsing. Since we force this formatter to
// always use UTC below, the other arguments do not matter.
//
// TODO(b/26162667): replace this with appendLiteral(" UTC") if b/16380363 gets resolved.
.appendTimeZoneOffset("", " UTC", false, 1, 1)
.toFormatter()
.withZoneUTC();
/**
* Returns the human-readable string version of the given DateTime, suitable for conversion
* within BigQuery from a string literal into a BigQuery timestamp type.
*/
public static String toBigqueryTimestampString(DateTime dateTime) {
return BIGQUERY_TIMESTAMP_FORMAT.print(dateTime);
}
/** Returns the DateTime for a given human-readable string-formatted BigQuery timestamp. */
public static DateTime fromBigqueryTimestampString(String timestampString) {
return BIGQUERY_TIMESTAMP_FORMAT.parseDateTime(timestampString);
}
/**
* Converts a time (in TimeUnits since the epoch) into a numeric string that BigQuery understands
* as a timestamp: the decimal number of seconds since the epoch, precise up to microseconds.
*
* @see "https://developers.google.com/bigquery/timestamp"
*/
public static String toBigqueryTimestamp(long timestamp, TimeUnit unit) {
long seconds = unit.toSeconds(timestamp);
long fractionalSeconds = unit.toMicros(timestamp) % 1000000;
return String.format("%d.%06d", seconds, fractionalSeconds);
}
/**
* Converts a {@link DateTime} into a numeric string that BigQuery understands as a timestamp:
* the decimal number of seconds since the epoch, precise up to microseconds.
*
* <p>Note that since {@code DateTime} only stores milliseconds, the last 3 digits will be zero.
*
* @see "https://developers.google.com/bigquery/timestamp"
*/
public static String toBigqueryTimestamp(DateTime dateTime) {
return toBigqueryTimestamp(dateTime.getMillis(), TimeUnit.MILLISECONDS);
}
/**
* Returns the canonical string format for a JobReference object (the project ID and then job ID,
* delimited by a single colon) since JobReference.toString() is not customized to return it.
*/
public static String toJobReferenceString(JobReference jobRef) {
return jobRef.getProjectId() + ":" + jobRef.getJobId();
}
}

View file

@ -1,16 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
@javax.annotation.ParametersAreNonnullByDefault
package com.google.domain.registry.bigquery;

View file

@ -1,18 +0,0 @@
package(default_visibility = ["//java/com/google/domain/registry:registry_project"])
java_library(
name = "braintree",
srcs = glob(["*.java"]),
visibility = ["//visibility:public"],
deps = [
"//java/com/google/common/base",
"//java/com/google/domain/registry/config",
"//java/com/google/domain/registry/keyring/api",
"//java/com/google/domain/registry/model",
"//third_party/java/braintree",
"//third_party/java/dagger",
"//third_party/java/jsr305_annotations",
"//third_party/java/jsr330_inject",
],
)

View file

@ -1,47 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.braintree;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.config.RegistryEnvironment;
import com.google.domain.registry.keyring.api.KeyModule.Key;
import com.braintreegateway.BraintreeGateway;
import dagger.Module;
import dagger.Provides;
import javax.inject.Singleton;
/** Dagger module for Braintree Payments API. */
@Module
public final class BraintreeModule {
@Provides
@Singleton
static BraintreeGateway provideBraintreeGateway(
RegistryEnvironment environment,
@Config("braintreeMerchantId") String merchantId,
@Config("braintreePublicKey") String publicKey,
@Key("braintreePrivateKey") String privateKey) {
return new BraintreeGateway(
environment == RegistryEnvironment.PRODUCTION
? com.braintreegateway.Environment.PRODUCTION
: com.braintreegateway.Environment.SANDBOX,
merchantId,
publicKey,
privateKey);
}
}

View file

@ -1,105 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.braintree;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Verify.verify;
import com.google.common.base.Optional;
import com.google.common.base.VerifyException;
import com.google.domain.registry.model.registrar.Registrar;
import com.google.domain.registry.model.registrar.RegistrarContact;
import com.braintreegateway.BraintreeGateway;
import com.braintreegateway.Customer;
import com.braintreegateway.CustomerRequest;
import com.braintreegateway.Result;
import com.braintreegateway.exceptions.NotFoundException;
import javax.inject.Inject;
/** Helper for creating Braintree customer entries for registrars. */
public class BraintreeRegistrarSyncer {
private final BraintreeGateway braintree;
@Inject
BraintreeRegistrarSyncer(BraintreeGateway braintreeGateway) {
this.braintree = braintreeGateway;
}
/**
* Syncs {@code registrar} with Braintree customer entry, creating it if one doesn't exist.
*
* <p>The customer ID will be the same as {@link Registrar#getClientIdentifier()}.
*
* <p>Creating a customer object in Braintree's database is a necessary step in order to associate
* a payment with a registrar. The transaction will fail if the customer object doesn't exist.
*
* @throws IllegalArgumentException if {@code registrar} is not using BRAINTREE billing
* @throws VerifyException if the Braintree API returned a failure response
*/
public void sync(Registrar registrar) throws VerifyException {
String id = registrar.getClientIdentifier();
checkArgument(registrar.getBillingMethod() == Registrar.BillingMethod.BRAINTREE,
"Registrar (%s) billing method (%s) is not BRAINTREE", id, registrar.getBillingMethod());
CustomerRequest request = createRequest(registrar);
Result<Customer> result;
if (doesCustomerExist(id)) {
result = braintree.customer().update(id, request);
} else {
result = braintree.customer().create(request);
}
verify(result.isSuccess(),
"Failed to sync registrar (%s) to braintree customer: %s", id, result.getMessage());
}
private CustomerRequest createRequest(Registrar registrar) {
CustomerRequest result =
new CustomerRequest()
.id(registrar.getClientIdentifier())
.customerId(registrar.getClientIdentifier())
.company(registrar.getRegistrarName());
Optional<RegistrarContact> contact = getBillingContact(registrar);
if (contact.isPresent()) {
result.email(contact.get().getEmailAddress());
result.phone(contact.get().getPhoneNumber());
result.fax(contact.get().getFaxNumber());
} else {
result.email(registrar.getEmailAddress());
result.phone(registrar.getPhoneNumber());
result.fax(registrar.getFaxNumber());
}
return result;
}
private Optional<RegistrarContact> getBillingContact(Registrar registrar) {
for (RegistrarContact contact : registrar.getContacts()) {
if (contact.getTypes().contains(RegistrarContact.Type.BILLING)) {
return Optional.of(contact);
}
}
return Optional.absent();
}
private boolean doesCustomerExist(String id) {
try {
braintree.customer().find(id);
return true;
} catch (NotFoundException e) {
return false;
}
}
}

View file

@ -1,17 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/** Braintree payment gateway utilities. */
@javax.annotation.ParametersAreNonnullByDefault
package com.google.domain.registry.braintree;

View file

@ -1,19 +0,0 @@
package(default_visibility = ["//java/com/google/domain/registry:registry_project"])
java_library(
name = "config",
srcs = glob(["*.java"]),
visibility = ["//visibility:public"],
deps = [
"//java/com/google/common/annotations",
"//java/com/google/common/base",
"//java/com/google/common/collect",
"//java/com/google/common/net",
"//third_party/java/appengine:appengine-api",
"//third_party/java/dagger",
"//third_party/java/joda_money",
"//third_party/java/joda_time",
"//third_party/java/jsr305_annotations",
],
)

View file

@ -1,570 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.config;
import static com.google.domain.registry.config.ConfigUtils.makeUrl;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import dagger.Module;
import dagger.Provides;
import org.joda.money.CurrencyUnit;
import org.joda.time.DateTimeConstants;
import org.joda.time.Duration;
import java.lang.annotation.Documented;
import java.net.URI;
import java.net.URL;
import javax.inject.Qualifier;
/** Dagger module for injecting configuration settings. */
@Module
public final class ConfigModule {
/** Dagger qualifier for configuration settings. */
@Qualifier
@Documented
public static @interface Config {
String value() default "";
}
private static final RegistryEnvironment registryEnvironment = RegistryEnvironment.get();
@Provides
public static RegistryEnvironment provideRegistryEnvironment() {
return registryEnvironment;
}
@Provides
public static RegistryConfig provideConfig(RegistryEnvironment environment) {
return environment.config();
}
@Provides
@Config("projectId")
public static String provideProjectId(RegistryConfig config) {
return config.getProjectId();
}
/** @see RegistryConfig#getZoneFilesBucket() */
@Provides
@Config("zoneFilesBucket")
public static String provideZoneFilesBucket(RegistryConfig config) {
return config.getZoneFilesBucket();
}
/** @see RegistryConfig#getCommitsBucket() */
@Provides
@Config("commitLogGcsBucket")
public static String provideCommitLogGcsBucket(RegistryConfig config) {
return config.getCommitsBucket();
}
/** @see RegistryConfig#getCommitLogDatastoreRetention() */
@Provides
@Config("commitLogDatastoreRetention")
public static Duration provideCommitLogDatastoreRetention(RegistryConfig config) {
return config.getCommitLogDatastoreRetention();
}
@Provides
@Config("domainListsGcsBucket")
public static String provideDomainListsGcsBucket(RegistryConfig config) {
return config.getDomainListsBucket();
}
/**
* Maximum number of commit logs to delete per transaction.
*
* <p>If we assume that the average key size is 256 bytes and that each manifest has six
* mutations, we can do about 5,000 deletes in a single transaction before hitting the 10mB limit.
* Therefore 500 should be a safe number, since it's an order of a magnitude less space than we
* need.
*
* <p>Transactions also have a four minute time limit. Since we have to perform N subqueries to
* fetch mutation keys, 500 would be a safe number if those queries were performed in serial,
* since each query would have about 500ms to complete, which is an order a magnitude more time
* than we need. However this does not apply, since the subqueries are performed asynchronously.
*
* @see com.google.domain.registry.backup.DeleteOldCommitLogsAction
*/
@Provides
@Config("commitLogMaxDeletes")
public static int provideCommitLogMaxDeletes() {
return 500;
}
/**
* Batch size for the number of transactions' worth of commit log data to process at once when
* exporting a commit log diff.
*
* @see com.google.domain.registry.backup.ExportCommitLogDiffAction
*/
@Provides
@Config("commitLogDiffExportBatchSize")
public static int provideCommitLogDiffExportBatchSize() {
return 100;
}
/**
* Returns the Google Cloud Storage bucket for staging BRDA escrow deposits.
*
* @see com.google.domain.registry.rde.PendingDepositChecker
*/
@Provides
@Config("brdaBucket")
public static String provideBrdaBucket(@Config("projectId") String projectId) {
return projectId + "-icann-brda";
}
/** @see com.google.domain.registry.rde.BrdaCopyAction */
@Provides
@Config("brdaDayOfWeek")
public static int provideBrdaDayOfWeek() {
return DateTimeConstants.TUESDAY;
}
/** Amount of time between BRDA deposits. */
@Provides
@Config("brdaInterval")
public static Duration provideBrdaInterval() {
return Duration.standardDays(7);
}
/** Maximum amount of time generating an BRDA deposit for a TLD could take, before killing. */
@Provides
@Config("brdaLockTimeout")
public static Duration provideBrdaLockTimeout() {
return Duration.standardHours(5);
}
/** Returns {@code true} if the target zone should be created in DNS if it does not exist. */
@Provides
@Config("dnsCreateZone")
public static boolean provideDnsCreateZone(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return false;
default:
return true;
}
}
/**
* The maximum number of domain and host updates to batch together to send to
* PublishDnsUpdatesAction, to avoid exceeding AppEngine's limits.
* */
@Provides
@Config("dnsTldUpdateBatchSize")
public static int provideDnsTldUpdateBatchSize() {
return 100;
}
/** The maximum interval (seconds) to lease tasks from the dns-pull queue. */
@Provides
@Config("dnsWriteLockTimeout")
public static Duration provideDnsWriteLockTimeout() {
// Optimally, we would set this to a little less than the length of the DNS refresh cycle, since
// otherwise, a new PublishDnsUpdatesAction could get kicked off before the current one has
// finished, which will try and fail to acquire the lock. However, it is more important that it
// be greater than the DNS write timeout, so that if that timeout occurs, it will be cleaned up
// gracefully, rather than having the lock time out. So we have to live with the possible lock
// failures.
return Duration.standardSeconds(75);
}
/** Returns the default time to live for DNS records. */
@Provides
@Config("dnsDefaultTtl")
public static Duration provideDnsDefaultTtl() {
return Duration.standardSeconds(180);
}
/**
* Number of sharded entity group roots used for performing strongly consistent scans.
*
* <p><b>Warning:</b> This number may increase but never decrease.
*
* @see com.google.domain.registry.model.index.EppResourceIndex
*/
@Provides
@Config("eppResourceIndexBucketCount")
public static int provideEppResourceIndexBucketCount(RegistryConfig config) {
return config.getEppResourceIndexBucketCount();
}
/**
* Returns size of Google Cloud Storage client connection buffer in bytes.
*
* @see com.google.domain.registry.gcs.GcsUtils
*/
@Provides
@Config("gcsBufferSize")
public static int provideGcsBufferSize() {
return 1024 * 1024;
}
/**
* Gets the email address of the admin account for the Google App.
*
* @see com.google.domain.registry.groups.DirectoryGroupsConnection
*/
@Provides
@Config("googleAppsAdminEmailAddress")
public static String provideGoogleAppsAdminEmailAddress(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return "admin@registry.google";
default:
return "admin@domainregistry-sandbox.co";
}
}
/**
* Returns the publicly accessible domain name for the running Google Apps instance.
*
* @see com.google.domain.registry.export.SyncGroupMembersAction
* @see com.google.domain.registry.tools.server.CreateGroupsAction
*/
@Provides
@Config("publicDomainName")
public static String providePublicDomainName(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return "registry.google";
default:
return "domainregistry-sandbox.co";
}
}
@Provides
@Config("tmchCaTestingMode")
public static boolean provideTmchCaTestingMode(RegistryConfig config) {
return config.getTmchCaTestingMode();
}
/**
* ICANN TMCH Certificate Revocation List URL.
*
* <p>This file needs to be downloaded at least once a day and verified to make sure it was
* signed by {@code icann-tmch.crt}.
*
* @see com.google.domain.registry.tmch.TmchCrlAction
* @see "http://tools.ietf.org/html/draft-lozano-tmch-func-spec-08#section-5.2.3.2"
*/
@Provides
@Config("tmchCrlUrl")
public static URL provideTmchCrlUrl(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return makeUrl("http://crl.icann.org/tmch.crl");
default:
return makeUrl("http://crl.icann.org/tmch_pilot.crl");
}
}
@Provides
@Config("tmchMarksdbUrl")
public static String provideTmchMarksdbUrl(RegistryConfig config) {
return config.getTmchMarksdbUrl();
}
/**
* Returns the Google Cloud Storage bucket for staging escrow deposits pending upload.
*
* @see com.google.domain.registry.rde.RdeStagingAction
*/
@Provides
@Config("rdeBucket")
public static String provideRdeBucket(@Config("projectId") String projectId) {
return projectId + "-rde";
}
/**
* Size of Ghostryde buffer in bytes for each layer in the pipeline.
*
* @see com.google.domain.registry.rde.Ghostryde
*/
@Provides
@Config("rdeGhostrydeBufferSize")
public static Integer provideRdeGhostrydeBufferSize() {
return 64 * 1024;
}
/** Amount of time between RDE deposits. */
@Provides
@Config("rdeInterval")
public static Duration provideRdeInterval() {
return Duration.standardDays(1);
}
/** Maximum amount of time for sending a small XML file to ICANN via HTTP, before killing. */
@Provides
@Config("rdeReportLockTimeout")
public static Duration provideRdeReportLockTimeout() {
return Duration.standardSeconds(60);
}
/**
* URL of ICANN's HTTPS server to which the RDE report should be {@code PUT}.
*
* <p>You must append {@code "/TLD/ID"} to this URL.
*
* @see com.google.domain.registry.rde.RdeReportAction
*/
@Provides
@Config("rdeReportUrlPrefix")
public static String provideRdeReportUrlPrefix(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return "https://ry-api.icann.org/report/registry-escrow-report";
default:
return "https://test-ry-api.icann.org:8543/report/registry-escrow-report";
}
}
/**
* Size of RYDE generator buffer in bytes for each of the five layers.
*
* @see com.google.domain.registry.rde.RydePgpCompressionOutputStream
* @see com.google.domain.registry.rde.RydePgpFileOutputStream
* @see com.google.domain.registry.rde.RydePgpSigningOutputStream
* @see com.google.domain.registry.rde.RydeTarOutputStream
*/
@Provides
@Config("rdeRydeBufferSize")
public static Integer provideRdeRydeBufferSize() {
return 64 * 1024;
}
/** Maximum amount of time generating an escrow deposit for a TLD could take, before killing. */
@Provides
@Config("rdeStagingLockTimeout")
public static Duration provideRdeStagingLockTimeout() {
return Duration.standardHours(5);
}
/** Maximum amount of time it should ever take to upload an escrow deposit, before killing. */
@Provides
@Config("rdeUploadLockTimeout")
public static Duration provideRdeUploadLockTimeout() {
return Duration.standardMinutes(30);
}
/**
* Minimum amount of time to wait between consecutive SFTP uploads on a single TLD.
*
* <p>This value was communicated to us by the escrow provider.
*/
@Provides
@Config("rdeUploadSftpCooldown")
public static Duration provideRdeUploadSftpCooldown() {
return Duration.standardHours(2);
}
/**
* Returns SFTP URL containing a username, hostname, port (optional), and directory (optional) to
* which cloud storage files are uploaded. The password should not be included, as it's better to
* use public key authentication.
*
* @see com.google.domain.registry.rde.RdeUploadAction
*/
@Provides
@Config("rdeUploadUrl")
public static URI provideRdeUploadUrl(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return URI.create("sftp://GoogleTLD@sftpipm2.ironmountain.com/Outbox");
default:
return URI.create("sftp://google@ppftpipm.ironmountain.com/Outbox");
}
}
@Provides
@Config("registrarConsoleEnabled")
public static boolean provideRegistrarConsoleEnabled() {
return true;
}
/** Maximum amount of time for syncing a spreadsheet, before killing. */
@Provides
@Config("sheetLockTimeout")
public static Duration provideSheetLockTimeout() {
return Duration.standardHours(1);
}
/**
* Returns ID of Google Spreadsheet to which Registrar entities should be synced.
*
* <p>This ID, as you'd expect, comes from the URL of the spreadsheet.
*
* @see com.google.domain.registry.export.sheet.SyncRegistrarsSheetAction
*/
@Provides
@Config("sheetRegistrarId")
public static Optional<String> provideSheetRegistrarId(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return Optional.of("1n2Gflqsgo9iDXcdt9VEskOVySZ8qIhQHJgjqsleCKdE");
case ALPHA:
case CRASH:
return Optional.of("16BwRt6v11Iw-HujCbAkmMxqw3sUG13B8lmXLo-uJTsE");
case SANDBOX:
return Optional.of("1TlR_UMCtfpkxT9oUEoF5JEbIvdWNkLRuURltFkJ_7_8");
case QA:
return Optional.of("1RoY1XZhLLwqBkrz0WbEtaT9CU6c8nUAXfId5BtM837o");
default:
return Optional.absent();
}
}
/** Amount of time between synchronizations of the Registrar spreadsheet. */
@Provides
@Config("sheetRegistrarInterval")
public static Duration provideSheetRegistrarInterval() {
return Duration.standardHours(1);
}
/**
* Returns SSH client connection and read timeout.
*
* @see com.google.domain.registry.rde.RdeUploadAction
*/
@Provides
@Config("sshTimeout")
public static Duration provideSshTimeout() {
return Duration.standardSeconds(30);
}
/** Duration after watermark where we shouldn't deposit, because transactions might be pending. */
@Provides
@Config("transactionCooldown")
public static Duration provideTransactionCooldown() {
return Duration.standardMinutes(5);
}
/**
* Number of times to retry a GAE operation when {@code TransientFailureException} is thrown.
*
* <p>The number of milliseconds it'll sleep before giving up is {@code 2^n - 2}.
*
* @see com.google.domain.registry.util.TaskEnqueuer
*/
@Provides
@Config("transientFailureRetries")
public static int provideTransientFailureRetries() {
return 12; // Four seconds.
}
/**
* Amount of time public HTTP proxies are permitted to cache our WHOIS responses.
*
* @see com.google.domain.registry.whois.WhoisHttpServer
*/
@Provides
@Config("whoisHttpExpires")
public static Duration provideWhoisHttpExpires() {
return Duration.standardDays(1);
}
/**
* Maximum number of results to return for an RDAP search query
*
* @see com.google.domain.registry.rdap.RdapActionBase
*/
@Provides
@Config("rdapResultSetMaxSize")
public static int provideRdapResultSetMaxSize() {
return 100;
}
/**
* Base for RDAP link paths.
*
* @see com.google.domain.registry.rdap.RdapActionBase
*/
@Provides
@Config("rdapLinkBase")
public static String provideRdapLinkBase() {
return "https://nic.google/rdap/";
}
/**
* WHOIS server displayed in RDAP query responses.
*
* @see com.google.domain.registry.rdap.RdapActionBase
*/
@Provides
@Config("rdapWhoisServer")
public static String provideRdapWhoisServer() {
return "whois.nic.google";
}
/** Returns Braintree Merchant Account IDs for each supported currency. */
@Provides
@Config("braintreeMerchantAccountIds")
public static ImmutableMap<CurrencyUnit, String> provideBraintreeMerchantAccountId(
RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return ImmutableMap.of(
CurrencyUnit.USD, "charlestonregistryUSD",
CurrencyUnit.JPY, "charlestonregistryJPY");
default:
return ImmutableMap.of(
CurrencyUnit.USD, "google",
CurrencyUnit.JPY, "google-jpy");
}
}
/**
* Returns Braintree Merchant ID of Registry, used for accessing Braintree API.
*
* <p>This is a base32 value copied from the Braintree website.
*/
@Provides
@Config("braintreeMerchantId")
public static String provideBraintreeMerchantId(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return "6gm2mm48k9ty4zmx";
default:
// Valentine: Domain Registry Braintree Sandbox
return "vqgn8khkq2cs6y9s";
}
}
/**
* Returns Braintree Public Key of Registry, used for accessing Braintree API.
*
* <p>This is a base32 value copied from the Braintree website.
*
* @see com.google.domain.registry.keyring.api.Keyring#getBraintreePrivateKey()
*/
@Provides
@Config("braintreePublicKey")
public static String provideBraintreePublicKey(RegistryEnvironment environment) {
switch (environment) {
case PRODUCTION:
return "tzcfxggzgbh2jg5x";
default:
// Valentine: Domain Registry Braintree Sandbox
return "tzcyzvm3mn7zkdnx";
}
}
}

View file

@ -1,37 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.config;
import java.net.MalformedURLException;
import java.net.URL;
/** Helper methods for configuration classes. */
final class ConfigUtils {
/**
* Creates a URL instance.
*
* @throws RuntimeException to rethrow {@link MalformedURLException}
*/
static URL makeUrl(String url) {
try {
return new URL(url);
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
private ConfigUtils() {}
}

View file

@ -1,243 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.config;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.net.HostAndPort;
import org.joda.time.Duration;
import java.net.URL;
/**
* Domain Registry configuration.
*
* <p>The goal of this custom configuration system is to have our project environments configured
* in type-safe Java code that can be refactored, rather than XML files and system properties.
*
* <p><b>Note:</b> This interface is deprecated by {@link ConfigModule}.
*/
public interface RegistryConfig {
/**
* Returns the App Engine project ID, which is based off the environment name.
*/
public String getProjectId();
/**
* Returns the Google Cloud Storage bucket for storing backup snapshots.
*
* @see com.google.domain.registry.export.ExportSnapshotServlet
*/
public String getSnapshotsBucket();
/**
* Returns the Google Cloud Storage bucket for storing exported domain lists.
*
* @see com.google.domain.registry.export.ExportDomainListsAction
*/
public String getDomainListsBucket();
/**
* Number of sharded commit log buckets.
*
* <p>This number is crucial for determining how much transactional throughput the system can
* allow, because it determines how many entity groups are available for writing commit logs.
* Since entity groups have a one transaction per second SLA (which is actually like ten in
* practice), a registry that wants to be able to handle one hundred transactions per second
* should have one hundred buckets.
*
* <p><b>Warning:</b> This can be raised but never lowered.
*
* @see com.google.domain.registry.model.ofy.CommitLogBucket
*/
public int getCommitLogBucketCount();
/**
* Returns the length of time before commit logs should be deleted from datastore.
*
* <p>The only reason you'll want to retain this commit logs in datastore is for performing
* point-in-time restoration queries for subsystems like RDE.
*
* @see com.google.domain.registry.backup.DeleteOldCommitLogsAction
* @see com.google.domain.registry.model.translators.CommitLogRevisionsTranslatorFactory
*/
public Duration getCommitLogDatastoreRetention();
/**
* Returns the Google Cloud Storage bucket for storing commit logs.
*
* @see com.google.domain.registry.backup.ExportCommitLogDiffAction
*/
public String getCommitsBucket();
/**
* Returns the Google Cloud Storage bucket for storing zone files.
*
* @see com.google.domain.registry.backup.ExportCommitLogDiffAction
*/
public String getZoneFilesBucket();
/**
* Returns {@code true} if TMCH certificate authority should be in testing mode.
*
* @see com.google.domain.registry.tmch.TmchCertificateAuthority
*/
public boolean getTmchCaTestingMode();
/**
* URL prefix for communicating with MarksDB ry interface.
*
* <p>This URL is used for DNL, SMDRL, and LORDN.
*
* @see com.google.domain.registry.tmch.Marksdb
* @see com.google.domain.registry.tmch.NordnUploadAction
*/
public String getTmchMarksdbUrl();
public Optional<String> getECatcherAddress();
/**
* Returns the address of the Domain Registry app HTTP server.
*
* <p>This is used by {@code registry_tool} to connect to the App Engine remote API.
*/
public HostAndPort getServer();
/** Returns the amount of time a singleton should be cached, before expiring. */
public Duration getSingletonCacheRefreshDuration();
/**
* Returns the amount of time a domain label list should be cached in memory before expiring.
*
* @see com.google.domain.registry.model.registry.label.ReservedList
* @see com.google.domain.registry.model.registry.label.PremiumList
*/
public Duration getDomainLabelListCacheDuration();
/** Returns the amount of time a singleton should be cached in persist mode, before expiring. */
public Duration getSingletonCachePersistDuration();
/**
* Returns the header text at the top of the reserved terms exported list.
*
* @see com.google.domain.registry.export.ExportUtils#exportReservedTerms
*/
public String getReservedTermsExportDisclaimer();
/**
* Returns a display name that is used on outgoing emails sent by Domain Registry.
*
* @see com.google.domain.registry.util.SendEmailUtils
*/
public String getGoogleAppsAdminEmailDisplayName();
/**
* Returns the email address that outgoing emails from the app are sent from.
*
* @see com.google.domain.registry.util.SendEmailUtils
*/
public String getGoogleAppsSendFromEmailAddress();
/**
* Returns the roid suffix to be used for the roids of all contacts and hosts. E.g. a value of
* "ROID" would end up creating roids that look like "ABC123-ROID".
*
* @see <a href="http://www.iana.org/assignments/epp-repository-ids/epp-repository-ids.xhtml">
* Extensible Provisioning Protocol (EPP) Repository Identifiers</a>
*/
public String getContactAndHostRepositoryIdentifier();
/**
* Returns the email address(es) that notifications of registrar and/or registrar contact updates
* should be sent to, or the empty list if updates should not be sent.
*
* @see com.google.domain.registry.ui.server.registrar.RegistrarServlet
*/
public ImmutableList<String> getRegistrarChangesNotificationEmailAddresses();
/**
* Returns default WHOIS server to use when {@code Registrar#getWhoisServer()} is {@code null}.
*
* @see "com.google.domain.registry.whois.DomainWhoisResponse"
* @see "com.google.domain.registry.whois.RegistrarWhoisResponse"
*/
public String getRegistrarDefaultWhoisServer();
/**
* Returns the default referral URL that is used unless registrars have specified otherwise.
*/
public URL getRegistrarDefaultReferralUrl();
/**
* Returns the title of the project used in generating documentation.
*/
public String getDocumentationProjectTitle();
/**
* Returns the maximum number of entities that can be checked at one time in an EPP check flow.
*/
public int getMaxChecks();
/**
* Returns the number of EppResourceIndex buckets to be used.
*/
public int getEppResourceIndexBucketCount();
/**
* Returns the base duration that gets doubled on each retry within {@code Ofy}.
*/
public Duration getBaseOfyRetryDuration();
/**
* Returns the global automatic transfer length for contacts. After this amount of time has
* elapsed, the transfer is automatically improved.
*/
public Duration getContactAutomaticTransferLength();
/**
* Returns the clientId of the registrar used by the {@code CheckApiServlet}.
*/
public String getCheckApiServletRegistrarClientId();
/**
* Returns the delay before executing async delete flow mapreduces.
*
* <p>This delay should be sufficiently longer than a transaction, to solve the following problem:
* <ul>
* <li>a domain mutation flow starts a transaction
* <li>the domain flow non-transactionally reads a resource and sees that it's not in
* PENDING_DELETE
* <li>the domain flow creates a new reference to this resource
* <li>a contact/host delete flow runs and marks the resource PENDING_DELETE and commits
* <li>the domain flow commits
* </ul>
*
* <p>Although we try not to add references to a PENDING_DELETE resource, strictly speaking that
* is ok as long as the mapreduce eventually sees the new reference (and therefore asynchronously
* fails the delete). Without this delay, the mapreduce might have started before the domain flow
* committed, and could potentially miss the reference.
*/
public Duration getAsyncDeleteFlowMapreduceDelay();
/**
* Returns the amount of time to back off following an async flow task failure.
*
* This should be ~orders of magnitude larger than the rate on the queue, in order to prevent
* the logs from filling up with unnecessarily failures.
*/
public Duration getAsyncFlowFailureBackoff();
}

View file

@ -1,90 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.config;
import com.google.common.annotations.VisibleForTesting;
import javax.annotation.Nullable;
/** Registry environments. */
public enum RegistryEnvironment {
/** Production environment. */
PRODUCTION,
/** Development environment. */
ALPHA,
/** Load/Backup/Restore Testing environment. */
CRASH,
/** Local machine environment. */
LOCAL,
/** Quality Assurance environment. */
QA,
/** Sandbox environment. */
SANDBOX,
/**
* Unit testing environment.
*
* <p>This is the default enum value. This is because it's non-trivial to configure the system
* property that specifies the environment in our unit tests.
*
* <p>Do not use this environment outside of unit tests.
*/
UNITTEST;
/** Returns environment configured by system property {@value #PROPERTY}. */
public static RegistryEnvironment get() {
return valueOf(System.getProperty(PROPERTY, UNITTEST.name()).toUpperCase());
}
/**
* Returns configuration for this Domain Registry environment.
*
* <p><b>WARNING:</b> Do not store this value to a static field, otherwise you won't be able to
* override it for testing. You should instead store the environment object to a static field.
*/
public RegistryConfig config() {
if (configOverride != null) {
return configOverride;
} else if (this == UNITTEST) {
return testingConfig;
} else {
return config;
}
}
/** Globally override registry configuration from within a unit test. */
@VisibleForTesting
public static void overrideConfigurationForTesting(@Nullable RegistryConfig newConfig) {
configOverride = newConfig;
}
@Nullable
private static RegistryConfig configOverride;
// TODO(b/19247780) Use true dependency injection for this. In the mean time, if you're not
// Google, you'll need to change this to include your own config class implementation at compile
// time.
private static final RegistryConfig testingConfig = new TestRegistryConfig();
private final RegistryConfig config = new TestRegistryConfig();
/** System property for configuring which environment we should use. */
public static final String PROPERTY = "com.google.domain.registry.environment";
}

View file

@ -1,181 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.config;
import static com.google.domain.registry.config.ConfigUtils.makeUrl;
import static org.joda.time.Duration.standardDays;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.net.HostAndPort;
import org.joda.time.Duration;
import java.net.URL;
/**
* An implementation of RegistryConfig for unit testing that contains suitable testing data.
*/
public class TestRegistryConfig implements RegistryConfig {
public TestRegistryConfig() {}
@Override
public String getProjectId() {
return "domain-registry";
}
@Override
public int getCommitLogBucketCount() {
return 1;
}
@Override
public Duration getCommitLogDatastoreRetention() {
return Duration.standardDays(30);
}
@Override
public String getSnapshotsBucket() {
return getProjectId() + "-snapshots";
}
@Override
public String getDomainListsBucket() {
return getProjectId() + "-domain-lists";
}
@Override
public String getCommitsBucket() {
return getProjectId() + "-commits";
}
@Override
public String getZoneFilesBucket() {
return getProjectId() + "-zonefiles";
}
@Override
public boolean getTmchCaTestingMode() {
return true;
}
@Override
public String getTmchMarksdbUrl() {
return "https://ry.marksdb.org";
}
@Override
public Optional<String> getECatcherAddress() {
throw new UnsupportedOperationException();
}
@Override
public HostAndPort getServer() {
throw new UnsupportedOperationException();
}
@Override
public Duration getSingletonCacheRefreshDuration() {
// All cache durations are set to zero so that unit tests can update and then retrieve data
// immediately without failure.
return Duration.ZERO;
}
@Override
public Duration getDomainLabelListCacheDuration() {
return Duration.ZERO;
}
@Override
public Duration getSingletonCachePersistDuration() {
return Duration.ZERO;
}
@Override
public String getReservedTermsExportDisclaimer() {
return "This is a disclaimer.\n";
}
@Override
public String getGoogleAppsAdminEmailDisplayName() {
return "Testing Domain Registry";
}
@Override
public String getGoogleAppsSendFromEmailAddress() {
return "noreply@testing.example";
}
@Override
public ImmutableList<String> getRegistrarChangesNotificationEmailAddresses() {
return ImmutableList.of("notification@test.example", "notification2@test.example");
}
@Override
public String getRegistrarDefaultWhoisServer() {
return "whois.nic.fakewhois.example";
}
@Override
public URL getRegistrarDefaultReferralUrl() {
return makeUrl("http://www.referral.example/path");
}
@Override
public String getDocumentationProjectTitle() {
return "Domain Registry";
}
@Override
public int getMaxChecks() {
return 50;
}
@Override
public int getEppResourceIndexBucketCount() {
return 2;
}
@Override
public Duration getBaseOfyRetryDuration() {
return Duration.ZERO;
}
@Override
public String getContactAndHostRepositoryIdentifier() {
return "ROID";
}
@Override
public Duration getContactAutomaticTransferLength() {
return standardDays(5);
}
@Override
public String getCheckApiServletRegistrarClientId() {
return "TheRegistrar";
}
@Override
public Duration getAsyncDeleteFlowMapreduceDelay() {
return Duration.standardSeconds(90);
}
@Override
public Duration getAsyncFlowFailureBackoff() {
return Duration.standardMinutes(10);
}
}

View file

@ -1,16 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
@javax.annotation.ParametersAreNonnullByDefault
package com.google.domain.registry.config;

View file

@ -1,25 +0,0 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
java_library(
name = "cron",
srcs = glob(["*.java"]),
deps = [
"//java/com/google/common/annotations",
"//java/com/google/common/base",
"//java/com/google/common/collect",
"//java/com/google/common/net",
"//java/com/google/domain/registry/model",
"//java/com/google/domain/registry/request",
"//java/com/google/domain/registry/util",
"//third_party/java/appengine:appengine-api",
"//third_party/java/dagger",
"//third_party/java/joda_time",
"//third_party/java/jsr305_annotations",
"//third_party/java/jsr330_inject",
"//third_party/java/objectify:objectify-v4_1",
"//third_party/java/servlet/servlet_api",
],
)

View file

@ -1,58 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.cron;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static java.util.concurrent.TimeUnit.SECONDS;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.common.base.Optional;
import com.google.domain.registry.model.ofy.CommitLogBucket;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.util.TaskEnqueuer;
import java.util.Random;
import javax.inject.Inject;
/** Action for fanning out cron tasks for each commit log bucket. */
@Action(path = "/_dr/cron/commitLogFanout", automaticallyPrintOk = true)
public final class CommitLogFanoutAction implements Runnable {
public static final String BUCKET_PARAM = "bucket";
private static final Random random = new Random();
@Inject TaskEnqueuer taskEnqueuer;
@Inject @Parameter("endpoint") String endpoint;
@Inject @Parameter("queue") String queue;
@Inject @Parameter("jitterSeconds") Optional<Integer> jitterSeconds;
@Inject CommitLogFanoutAction() {}
@Override
public void run() {
Queue taskQueue = getQueue(queue);
for (int bucketId : CommitLogBucket.getBucketIds()) {
TaskOptions taskOptions = TaskOptions.Builder.withUrl(endpoint)
.param(BUCKET_PARAM, Integer.toString(bucketId))
.countdownMillis(jitterSeconds.isPresent()
? random.nextInt((int) SECONDS.toMillis(jitterSeconds.get()))
: 0);
taskEnqueuer.enqueue(taskQueue, taskOptions);
}
}
}

View file

@ -1,76 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.cron;
import static com.google.domain.registry.request.RequestParameters.extractBooleanParameter;
import static com.google.domain.registry.request.RequestParameters.extractOptionalIntParameter;
import static com.google.domain.registry.request.RequestParameters.extractRequiredParameter;
import static com.google.domain.registry.request.RequestParameters.extractSetOfParameters;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableSet;
import com.google.domain.registry.request.Parameter;
import dagger.Module;
import dagger.Provides;
import javax.servlet.http.HttpServletRequest;
/** Dagger module for the cron package. */
@Module
public final class CronModule {
@Provides
@Parameter("endpoint")
static String provideEndpoint(HttpServletRequest req) {
return extractRequiredParameter(req, "endpoint");
}
@Provides
@Parameter("exclude")
static ImmutableSet<String> provideExcludes(HttpServletRequest req) {
return extractSetOfParameters(req, "exclude");
}
@Provides
@Parameter("queue")
static String provideQueue(HttpServletRequest req) {
return extractRequiredParameter(req, "queue");
}
@Provides
@Parameter("runInEmpty")
static boolean provideRunInEmpty(HttpServletRequest req) {
return extractBooleanParameter(req, "runInEmpty");
}
@Provides
@Parameter("forEachRealTld")
static boolean provideForEachRealTld(HttpServletRequest req) {
return extractBooleanParameter(req, "forEachRealTld");
}
@Provides
@Parameter("forEachTestTld")
static boolean provideForEachTestTld(HttpServletRequest req) {
return extractBooleanParameter(req, "forEachTestTld");
}
@Provides
@Parameter("jitterSeconds")
static Optional<Integer> provideJitterSeconds(HttpServletRequest req) {
return extractOptionalIntParameter(req, "jitterSeconds");
}
}

View file

@ -1,134 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.cron;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static com.google.appengine.api.taskqueue.TaskOptions.Builder.withUrl;
import static com.google.common.base.Predicates.in;
import static com.google.common.base.Predicates.not;
import static com.google.common.base.Strings.nullToEmpty;
import static com.google.common.collect.Iterables.concat;
import static com.google.common.collect.Iterables.getFirst;
import static com.google.common.collect.Multimaps.filterKeys;
import static com.google.common.collect.Sets.difference;
import static com.google.domain.registry.model.registry.Registries.getTldsOfType;
import static com.google.domain.registry.model.registry.Registry.TldType.REAL;
import static com.google.domain.registry.model.registry.Registry.TldType.TEST;
import static java.util.concurrent.TimeUnit.SECONDS;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableListMultimap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Multimap;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.request.ParameterMap;
import com.google.domain.registry.request.RequestParameters;
import com.google.domain.registry.util.TaskEnqueuer;
import java.util.Random;
import java.util.Set;
import javax.inject.Inject;
/**
* Action for fanning out cron tasks shared by TLD.
*
* <h3>Parameters Reference</h3>
*
* <ul>
* <li>{@code endpoint} (Required) URL path of servlet to launch. This may contain pathargs.
* <li>{@code queue} (Required) Name of the App Engine push queue to which this task should be sent.
* <li>{@code forEachRealTld} Launch the task in each real TLD namespace.
* <li>{@code forEachTestTld} Launch the task in each test TLD namespace.
* <li>{@code runInEmpty} Launch the task in the empty namespace.
* <li>{@code exclude} TLDs to exclude.
* <li>{@code jitterSeconds} Randomly delay each task by up to this many seconds.
* <li>Any other parameters specified will be passed through as POST parameters to the called task.
* </ul>
*
* <h3>Patharg Reference</h3>
*
* <p>The following values may be specified inside the "endpoint" param.
* <ul>
* <li>{@code :tld} Substituted with an ASCII tld, if tld fanout is enabled.
* This patharg is mostly useful for aesthetic purposes, since tasks are already namespaced.
* </ul>
*/
@Action(path = "/_dr/cron/fanout", automaticallyPrintOk = true)
public final class TldFanoutAction implements Runnable {
private static final String ENDPOINT_PARAM = "endpoint";
private static final String QUEUE_PARAM = "queue";
private static final String FOR_EACH_REAL_TLD_PARAM = "forEachRealTld";
private static final String FOR_EACH_TEST_TLD_PARAM = "forEachTestTld";
private static final String RUN_IN_EMPTY_PARAM = "runInEmpty";
private static final String EXCLUDE_PARAM = "exclude";
private static final String JITTER_SECONDS_PARAM = "jitterSeconds";
/** A set of control params to TldFanoutAction that aren't passed down to the executing action. */
private static final Set<String> CONTROL_PARAMS = ImmutableSet.of(
ENDPOINT_PARAM,
QUEUE_PARAM,
FOR_EACH_REAL_TLD_PARAM,
FOR_EACH_TEST_TLD_PARAM,
RUN_IN_EMPTY_PARAM,
EXCLUDE_PARAM,
JITTER_SECONDS_PARAM);
private static final String TLD_PATHARG = ":tld";
private static final Random random = new Random();
@Inject TaskEnqueuer taskEnqueuer;
@Inject @Parameter(ENDPOINT_PARAM) String endpoint;
@Inject @Parameter(QUEUE_PARAM) String queue;
@Inject @Parameter(FOR_EACH_REAL_TLD_PARAM) boolean forEachRealTld;
@Inject @Parameter(FOR_EACH_TEST_TLD_PARAM) boolean forEachTestTld;
@Inject @Parameter(RUN_IN_EMPTY_PARAM) boolean runInEmpty;
@Inject @Parameter(EXCLUDE_PARAM) ImmutableSet<String> excludes;
@Inject @Parameter(JITTER_SECONDS_PARAM) Optional<Integer> jitterSeconds;
@Inject @ParameterMap ImmutableListMultimap<String, String> params;
@Inject TldFanoutAction() {}
@Override
public void run() {
Set<String> namespaces = ImmutableSet.copyOf(concat(
runInEmpty ? ImmutableSet.of("") : ImmutableSet.<String>of(),
forEachRealTld ? getTldsOfType(REAL) : ImmutableSet.<String>of(),
forEachTestTld ? getTldsOfType(TEST) : ImmutableSet.<String>of()));
Multimap<String, String> flowThruParams = filterKeys(params, not(in(CONTROL_PARAMS)));
Queue taskQueue = getQueue(queue);
for (String namespace : difference(namespaces, excludes)) {
taskEnqueuer.enqueue(taskQueue, createTaskOptions(namespace, flowThruParams));
}
}
private TaskOptions createTaskOptions(String tld, Multimap<String, String> params) {
TaskOptions options =
withUrl(endpoint.replace(TLD_PATHARG, String.valueOf(tld)))
.countdownMillis(
jitterSeconds.isPresent()
? random.nextInt((int) SECONDS.toMillis(jitterSeconds.get()))
: 0);
options.param(RequestParameters.PARAM_TLD, tld);
for (String param : params.keySet()) {
// TaskOptions.param() does not accept null values.
options.param(param, nullToEmpty((getFirst(params.get(param), null))));
}
return options;
}
}

View file

@ -1,16 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
@javax.annotation.ParametersAreNonnullByDefault
package com.google.domain.registry.cron;

View file

@ -1,41 +0,0 @@
# Description:
# Routines to publish authoritative DNS.
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
java_library(
name = "constants",
srcs = ["DnsConstants.java"],
)
java_library(
name = "dns",
srcs = glob(
["*.java"],
exclude = ["DnsConstants.java"],
),
deps = [
":constants",
"//java/com/google/common/annotations",
"//java/com/google/common/base",
"//java/com/google/common/collect",
"//java/com/google/common/html",
"//java/com/google/common/io",
"//java/com/google/common/net",
"//java/com/google/domain/registry/config",
"//java/com/google/domain/registry/dns/writer/api",
"//java/com/google/domain/registry/model",
"//java/com/google/domain/registry/request",
"//java/com/google/domain/registry/util",
"//third_party/java/appengine:appengine-api",
"//third_party/java/dagger",
"//third_party/java/joda_time",
"//third_party/java/jsr305_annotations",
"//third_party/java/jsr330_inject",
"//third_party/java/objectify:objectify-v4_1",
"//third_party/java/servlet/servlet_api",
],
)

View file

@ -1,35 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns;
/** Static class for DNS-related constants. */
public class DnsConstants {
private DnsConstants() {}
/** The name of the DNS pull queue. */
public static final String DNS_PULL_QUEUE_NAME = "dns-pull"; // See queue.xml.
/** The name of the DNS publish push queue. */
public static final String DNS_PUBLISH_PUSH_QUEUE_NAME = "dns-publish"; // See queue.xml.
/** The parameter to use for storing the target type ("domain" or "host" or "zone"). */
public static final String DNS_TARGET_TYPE_PARAM = "Target-Type";
/** The parameter to use for storing the target name (domain or host name) with the task. */
public static final String DNS_TARGET_NAME_PARAM = "Target-Name";
/** The possible values of the {@code DNS_TARGET_NAME_PARAM} parameter. */
public enum TargetType { DOMAIN, HOST, ZONE }
}

View file

@ -1,93 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns;
import static com.google.domain.registry.dns.DnsConstants.DNS_PUBLISH_PUSH_QUEUE_NAME;
import static com.google.domain.registry.dns.DnsConstants.DNS_PULL_QUEUE_NAME;
import static com.google.domain.registry.dns.PublishDnsUpdatesAction.DOMAINS_PARAM;
import static com.google.domain.registry.dns.PublishDnsUpdatesAction.HOSTS_PARAM;
import static com.google.domain.registry.dns.ReadDnsQueueAction.KEEP_TASKS_PARAM;
import static com.google.domain.registry.request.RequestParameters.extractBooleanParameter;
import static com.google.domain.registry.request.RequestParameters.extractEnumParameter;
import static com.google.domain.registry.request.RequestParameters.extractRequiredParameter;
import static com.google.domain.registry.request.RequestParameters.extractSetOfParameters;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.QueueFactory;
import com.google.domain.registry.dns.DnsConstants.TargetType;
import com.google.domain.registry.dns.writer.api.DnsWriterZone;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.request.RequestParameters;
import dagger.Module;
import dagger.Provides;
import java.util.Set;
import javax.inject.Named;
import javax.servlet.http.HttpServletRequest;
/** Dagger module for the dns package. */
@Module
public final class DnsModule {
@Provides
@DnsWriterZone
static String provideZoneName(@Parameter(RequestParameters.PARAM_TLD) String tld) {
return tld;
}
@Provides
@Named(DNS_PULL_QUEUE_NAME)
static Queue provideDnsPullQueue() {
return QueueFactory.getQueue(DNS_PULL_QUEUE_NAME);
}
@Provides
@Named(DNS_PUBLISH_PUSH_QUEUE_NAME)
static Queue provideDnsUpdatePushQueue() {
return QueueFactory.getQueue(DNS_PUBLISH_PUSH_QUEUE_NAME);
}
@Provides
@Parameter(DOMAINS_PARAM)
static Set<String> provideDomains(HttpServletRequest req) {
return extractSetOfParameters(req, DOMAINS_PARAM);
}
@Provides
@Parameter(HOSTS_PARAM)
static Set<String> provideHosts(HttpServletRequest req) {
return extractSetOfParameters(req, HOSTS_PARAM);
}
@Provides
@Parameter(KEEP_TASKS_PARAM)
static boolean provideKeepTasks(HttpServletRequest req) {
return extractBooleanParameter(req, KEEP_TASKS_PARAM);
}
@Provides
@Parameter("name")
static String provideName(HttpServletRequest req) {
return extractRequiredParameter(req, "name");
}
@Provides
@Parameter("type")
static TargetType provideType(HttpServletRequest req) {
return extractEnumParameter(req, TargetType.class, "type");
}
}

View file

@ -1,161 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Strings.isNullOrEmpty;
import static com.google.domain.registry.dns.DnsConstants.DNS_PULL_QUEUE_NAME;
import static com.google.domain.registry.dns.DnsConstants.DNS_TARGET_NAME_PARAM;
import static com.google.domain.registry.dns.DnsConstants.DNS_TARGET_TYPE_PARAM;
import static com.google.domain.registry.model.registry.Registries.assertTldExists;
import static com.google.domain.registry.request.RequestParameters.PARAM_TLD;
import static com.google.domain.registry.util.DomainNameUtils.getTldFromDomainName;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.QueueConstants;
import com.google.appengine.api.taskqueue.QueueFactory;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.appengine.api.taskqueue.TaskOptions.Method;
import com.google.appengine.api.taskqueue.TransientFailureException;
import com.google.apphosting.api.DeadlineExceededException;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.net.InternetDomainName;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.dns.DnsConstants.TargetType;
import com.google.domain.registry.model.registry.Registries;
import com.google.domain.registry.util.FormattingLogger;
import org.joda.time.Duration;
import java.util.List;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.inject.Named;
/** Methods for manipulating the queue used for DNS write tasks. */
public class DnsQueue {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@Inject @Config("dnsWriteLockTimeout") Duration writeLockTimeout;
@Inject @Named(DNS_PULL_QUEUE_NAME) Queue queue;
@Inject DnsQueue() {}
long writeBatchSize = QueueConstants.maxLeaseCount();
/**
* Enqueues the given task type with the given target name to the DNS queue, tagged with the
* specified TLD.
*/
private TaskHandle addToQueue(TargetType targetType, String targetName, String tld) {
return queue.add(TaskOptions.Builder
// TODO(b/24564175): don't set the tag
.withTag(tld)
.method(Method.PULL)
.param(DNS_TARGET_TYPE_PARAM, targetType.toString())
.param(DNS_TARGET_NAME_PARAM, targetName)
.param(PARAM_TLD, tld));
}
/**
* Adds a task to the queue to refresh the DNS information for the specified subordinate host.
*/
public TaskHandle addHostRefreshTask(String fullyQualifiedHostName) {
Optional<InternetDomainName> tld =
Registries.findTldForName(InternetDomainName.from(fullyQualifiedHostName));
checkArgument(tld.isPresent(),
String.format("%s is not a subordinate host to a known tld", fullyQualifiedHostName));
return addToQueue(TargetType.HOST, fullyQualifiedHostName, tld.get().toString());
}
/** Adds a task to the queue to refresh the DNS information for the specified domain. */
public TaskHandle addDomainRefreshTask(String fullyQualifiedDomainName) {
return addToQueue(
TargetType.DOMAIN,
fullyQualifiedDomainName,
assertTldExists(getTldFromDomainName(fullyQualifiedDomainName)));
}
/** Adds a task to the queue to refresh the DNS information for the specified zone. */
public TaskHandle addZoneRefreshTask(String fullyQualifiedZoneName) {
return addToQueue(TargetType.ZONE, fullyQualifiedZoneName, fullyQualifiedZoneName);
}
/**
* Returns a batch of pending tasks.
*/
public List<TaskHandle> leaseTasks() {
return leaseTasks(null);
}
/**
* Returns a batch of pending tasks.
*
* @param tag the filter used to lease only those tasks that match
*/
public List<TaskHandle> leaseTasks(@Nullable String tag) {
try {
return isNullOrEmpty(tag)
? queue.leaseTasks(writeLockTimeout.getMillis(), MILLISECONDS, writeBatchSize)
: queue.leaseTasksByTag(writeLockTimeout.getMillis(), MILLISECONDS, writeBatchSize, tag);
} catch (TransientFailureException | DeadlineExceededException e) {
logger.severe(e, "Failed leasing tasks too fast");
return ImmutableList.of();
}
}
/** Reduce the task lease time to zero, making it immediately available to be leased again. */
public void dropTaskLease(TaskHandle task) {
try {
queue.modifyTaskLease(task, 0, TimeUnit.SECONDS);
} catch (IllegalStateException e) {
logger.warningfmt(e, "Failed dropping expired lease: %s", task.getName());
} catch (TransientFailureException | DeadlineExceededException e) {
logger.severe(e, "Failed dropping task leases too fast");
}
}
/** Delete the task, removing it from the queue permanently. */
public void deleteTask(TaskHandle task) {
try {
queue.deleteTask(task);
} catch (TransientFailureException | DeadlineExceededException e) {
logger.severe(e, "Failed deleting tasks too fast");
}
}
/** Delete a list of tasks, removing them from the queue permanently. */
public void deleteTasks(List<TaskHandle> tasks) {
try {
queue.deleteTask(tasks);
} catch (TransientFailureException | DeadlineExceededException e) {
logger.severe(e, "Failed deleting tasks too fast");
}
}
// TODO(b/19483428): Remove me when flows package is ported to Dagger.
/** Creates a new instance. */
public static DnsQueue create() {
DnsQueue result = new DnsQueue();
result.writeLockTimeout = Duration.standardSeconds(120);
result.queue = QueueFactory.getQueue(DNS_PULL_QUEUE_NAME);
return result;
}
}

View file

@ -1,98 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns;
import static com.google.domain.registry.model.server.Lock.executeWithLocks;
import static com.google.domain.registry.request.Action.Method.POST;
import static com.google.domain.registry.util.CollectionUtils.nullToEmpty;
import com.google.common.net.InternetDomainName;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.dns.writer.api.DnsWriter;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.HttpException.ServiceUnavailableException;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.request.RequestParameters;
import com.google.domain.registry.util.DomainNameUtils;
import com.google.domain.registry.util.FormattingLogger;
import org.joda.time.Duration;
import java.util.Set;
import java.util.concurrent.Callable;
import javax.inject.Inject;
import javax.inject.Provider;
/** Task that sends domain and host updates to the DNS server. */
@Action(path = PublishDnsUpdatesAction.PATH, method = POST, automaticallyPrintOk = true)
public final class PublishDnsUpdatesAction implements Runnable, Callable<Void> {
public static final String PATH = "/_dr/task/publishDnsUpdates";
public static final String DOMAINS_PARAM = "domains";
public static final String HOSTS_PARAM = "hosts";
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@Inject DnsQueue dnsQueue;
@Inject Provider<DnsWriter> writerProvider;
@Inject @Config("dnsWriteLockTimeout") Duration timeout;
@Inject @Parameter(RequestParameters.PARAM_TLD) String tld;
@Inject @Parameter(DOMAINS_PARAM) Set<String> domains;
@Inject @Parameter(HOSTS_PARAM) Set<String> hosts;
@Inject PublishDnsUpdatesAction() {}
/** Runs the task. */
@Override
public void run() {
String lockName = String.format("DNS zone %s", tld);
// If executeWithLocks fails to get the lock, it does not throw an exception, simply returns
// false. We need to make sure to take note of this error; otherwise, a failed lock might result
// in the update task being dequeued and dropped. A message will already have been logged
// to indicate the problem.
if (!executeWithLocks(this, getClass(), tld, timeout, lockName)) {
throw new ServiceUnavailableException("Lock failure");
}
}
/** Runs the task, with the lock. */
@Override
public Void call() {
processBatch();
return null;
}
/** Steps through the domain and host refreshes contained in the parameters and processes them. */
private void processBatch() {
try (DnsWriter writer = writerProvider.get()) {
for (String domain : nullToEmpty(domains)) {
if (!DomainNameUtils.isUnder(
InternetDomainName.from(domain), InternetDomainName.from(tld))) {
logger.severefmt("%s: skipping domain %s not under tld", tld, domain);
} else {
writer.publishDomain(domain);
}
}
for (String host : nullToEmpty(hosts)) {
if (!DomainNameUtils.isUnder(
InternetDomainName.from(host), InternetDomainName.from(tld))) {
logger.severefmt("%s: skipping host %s not under tld", tld, host);
} else {
writer.publishHost(host);
}
}
}
}
}

View file

@ -1,204 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns;
import static com.google.appengine.api.taskqueue.TaskOptions.Builder.withUrl;
import static com.google.common.collect.Sets.difference;
import static com.google.domain.registry.dns.DnsConstants.DNS_PUBLISH_PUSH_QUEUE_NAME;
import static com.google.domain.registry.dns.DnsConstants.DNS_TARGET_NAME_PARAM;
import static com.google.domain.registry.dns.DnsConstants.DNS_TARGET_TYPE_PARAM;
import static com.google.domain.registry.model.registry.Registries.getTlds;
import static java.util.concurrent.TimeUnit.SECONDS;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.common.base.Optional;
import com.google.common.collect.ComparisonChain;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.TreeMultimap;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.dns.DnsConstants.TargetType;
import com.google.domain.registry.model.registry.Registry;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.request.RequestParameters;
import com.google.domain.registry.util.FormattingLogger;
import com.google.domain.registry.util.TaskEnqueuer;
import java.io.UnsupportedEncodingException;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import javax.inject.Inject;
import javax.inject.Named;
/**
* Action for fanning out DNS refresh tasks by TLD, using data taken from the DNS pull queue.
*
* <h3>Parameters Reference</h3>
*
* <ul>
* <li>{@code jitterSeconds} Randomly delay each task by up to this many seconds.
* <li>{@code keepTasks} Do not delete any tasks from the pull queue, whether they are processed or
* not.
* </ul>
*/
@Action(path = "/_dr/cron/readDnsQueue", automaticallyPrintOk = true)
public final class ReadDnsQueueAction implements Runnable {
public static final String KEEP_TASKS_PARAM = "keepTasks";
private static final String JITTER_SECONDS_PARAM = "jitterSeconds";
private static final Random random = new Random();
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@Inject @Config("dnsTldUpdateBatchSize") int tldUpdateBatchSize;
@Inject @Named(DNS_PUBLISH_PUSH_QUEUE_NAME) Queue dnsPublishPushQueue;
@Inject @Parameter(JITTER_SECONDS_PARAM) Optional<Integer> jitterSeconds;
@Inject @Parameter(KEEP_TASKS_PARAM) boolean keepTasks;
@Inject DnsQueue dnsQueue;
@Inject TaskEnqueuer taskEnqueuer;
@Inject ReadDnsQueueAction() {}
/** Container for items we pull out of the DNS pull queue and process for fanout. */
private class RefreshItem implements Comparable<RefreshItem> {
final TargetType type;
final String name;
public RefreshItem(final TargetType type, final String name) {
this.type = type;
this.name = name;
}
@Override
public int compareTo(RefreshItem other) {
return ComparisonChain.start()
.compare(this.type, other.type)
.compare(this.name, other.name)
.result();
}
}
/** Leases all tasks from the pull queue and creates per-tld update actions for them. */
@Override
public void run() {
Set<String> tldsOfInterest = getTlds();
List<TaskHandle> tasks = dnsQueue.leaseTasks();
if (tasks.isEmpty()) {
return;
}
logger.infofmt("leased %d tasks", tasks.size());
// Normally, all tasks will be deleted from the pull queue. But some might have to remain if
// we are not interested in the associated TLD, or if the TLD is paused. Remember which these
// are.
Set<TaskHandle> tasksToKeep = new HashSet<>();
// The paused TLDs for which we found at least one refresh request.
Set<String> pausedTlds = new HashSet<>();
// Create a sorted multimap into which we will insert the refresh items, so that the items for
// each TLD will be grouped together, and domains and hosts will be grouped within a TLD. The
// grouping and ordering of domains and hosts is not technically necessary, but a predictable
// ordering makes it possible to write detailed tests.
TreeMultimap<String, RefreshItem> refreshItemMultimap = TreeMultimap.create();
// Read all tasks on the DNS pull queue and load them into the refresh item multimap.
for (TaskHandle task : tasks) {
try {
Map<String, String> params = ImmutableMap.copyOf(task.extractParams());
// Dual-read the TLD from either the parameter (new methodology) or the tag (old way).
// TODO(b/24564175): get the TLD from the regular parameter only.
String tld = task.getTag();
if (tld == null) {
tld = params.get(RequestParameters.PARAM_TLD);
}
if (tld == null) {
logger.severe("discarding invalid DNS refresh request; no TLD specified");
} else if (!tldsOfInterest.contains(tld)) {
tasksToKeep.add(task);
} else if (Registry.get(tld).getDnsPaused()) {
tasksToKeep.add(task);
pausedTlds.add(tld);
} else {
String typeString = params.get(DNS_TARGET_TYPE_PARAM);
String name = params.get(DNS_TARGET_NAME_PARAM);
if (typeString == null) {
logger.severe("discarding invalid DNS refresh request; no type specified");
} else if (name == null) {
logger.severe("discarding invalid DNS refresh request; no name specified");
} else {
TargetType type = TargetType.valueOf(typeString);
switch (type) {
case DOMAIN:
case HOST:
refreshItemMultimap.put(tld, new RefreshItem(type, name));
break;
default:
logger.severefmt("discarding DNS refresh request of type %s", typeString);
break;
}
}
}
} catch (UnsupportedEncodingException e) {
logger.severefmt(e, "discarding invalid DNS refresh request (task %s)", task);
}
}
if (!pausedTlds.isEmpty()) {
logger.infofmt("the dns-pull queue is paused for tlds: %s", pausedTlds);
}
// Loop through the multimap by TLD and generate refresh tasks for the hosts and domains.
for (Map.Entry<String, Collection<RefreshItem>> tldRefreshItemsEntry
: refreshItemMultimap.asMap().entrySet()) {
for (List<RefreshItem> chunk : Iterables.partition(
tldRefreshItemsEntry.getValue(), tldUpdateBatchSize)) {
TaskOptions options = withUrl(PublishDnsUpdatesAction.PATH)
.countdownMillis(jitterSeconds.isPresent()
? random.nextInt((int) SECONDS.toMillis(jitterSeconds.get()))
: 0)
.param(RequestParameters.PARAM_TLD, tldRefreshItemsEntry.getKey());
for (RefreshItem refreshItem : chunk) {
options.param(
(refreshItem.type == TargetType.HOST)
? PublishDnsUpdatesAction.HOSTS_PARAM : PublishDnsUpdatesAction.DOMAINS_PARAM,
refreshItem.name);
}
taskEnqueuer.enqueue(dnsPublishPushQueue, options);
}
}
Set<TaskHandle> tasksToDelete = difference(ImmutableSet.copyOf(tasks), tasksToKeep);
// In keepTasks mode, never delete any tasks.
if (keepTasks) {
logger.infofmt("would have deleted %d tasks", tasksToDelete.size());
for (TaskHandle task : tasks) {
dnsQueue.dropTaskLease(task);
}
// Otherwise, either delete or drop the lease of each task.
} else {
logger.infofmt("deleting %d tasks", tasksToDelete.size());
dnsQueue.deleteTasks(ImmutableList.copyOf(tasksToDelete));
logger.infofmt("dropping %d tasks", tasksToKeep.size());
for (TaskHandle task : tasksToKeep) {
dnsQueue.dropTaskLease(task);
}
logger.infofmt("done");
}
}
}

View file

@ -1,80 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns;
import static com.google.domain.registry.model.EppResourceUtils.loadByUniqueId;
import com.google.domain.registry.dns.DnsConstants.TargetType;
import com.google.domain.registry.model.EppResource;
import com.google.domain.registry.model.domain.DomainResource;
import com.google.domain.registry.model.host.HostResource;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.HttpException.BadRequestException;
import com.google.domain.registry.request.HttpException.NotFoundException;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.util.Clock;
import javax.inject.Inject;
/** Action that manually triggers refresh of DNS information. */
@Action(path = "/_dr/dnsRefresh", automaticallyPrintOk = true)
public final class RefreshDnsAction implements Runnable {
@Inject Clock clock;
@Inject DnsQueue dnsQueue;
@Inject @Parameter("name") String domainOrHostName;
@Inject @Parameter("type") TargetType type;
@Inject RefreshDnsAction() {}
@Override
public void run() {
if (!domainOrHostName.contains(".")) {
throw new BadRequestException("URL parameter 'name' must be fully qualified");
}
boolean domainLookup;
Class<? extends EppResource> clazz;
switch (type) {
case DOMAIN:
domainLookup = true;
clazz = DomainResource.class;
break;
case HOST:
domainLookup = false;
clazz = HostResource.class;
break;
default:
throw new BadRequestException("Unsupported type: " + type);
}
EppResource eppResource = loadByUniqueId(clazz, domainOrHostName, clock.nowUtc());
if (eppResource == null) {
throw new NotFoundException(
String.format("%s %s not found", type, domainOrHostName));
}
if (domainLookup) {
dnsQueue.addDomainRefreshTask(domainOrHostName);
} else {
if (((HostResource) eppResource).getSuperordinateDomain() == null) {
throw new BadRequestException(
String.format("%s isn't a subordinate hostname", domainOrHostName));
} else {
// Don't enqueue host refresh tasks for external hosts.
dnsQueue.addHostRefreshTask(domainOrHostName);
}
}
}
}

View file

@ -1,158 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.domain.registry.dns.DnsConstants.DNS_TARGET_NAME_PARAM;
import static com.google.domain.registry.dns.DnsConstants.DNS_TARGET_TYPE_PARAM;
import static com.google.domain.registry.model.server.Lock.executeWithLocks;
import static com.google.domain.registry.request.Action.Method.POST;
import com.google.appengine.api.LifecycleManager;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.common.base.Throwables;
import com.google.common.net.InternetDomainName;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.dns.DnsConstants.TargetType;
import com.google.domain.registry.dns.writer.api.DnsWriter;
import com.google.domain.registry.model.registry.Registry;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.HttpException;
import com.google.domain.registry.request.HttpException.BadRequestException;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.request.RequestParameters;
import com.google.domain.registry.util.DomainNameUtils;
import com.google.domain.registry.util.FormattingLogger;
import org.joda.time.Duration;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import javax.inject.Inject;
import javax.inject.Provider;
/** Action that consumes pull-queue for zone updates to write to the DNS server. */
@Action(path = "/_dr/task/writeDns", method = POST, automaticallyPrintOk = true)
public final class WriteDnsAction implements Runnable, Callable<Void> {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@Inject DnsQueue dnsQueue;
@Inject Provider<DnsWriter> writerProvider;
@Inject @Config("dnsWriteLockTimeout") Duration timeout;
@Inject @Parameter(RequestParameters.PARAM_TLD) String tld;
@Inject WriteDnsAction() {}
/** Runs the task. */
@Override
public void run() {
String lockName = String.format("DNS zone %s", tld);
executeWithLocks(this, getClass(), tld, timeout, lockName);
}
/** Runs the task, with the lock. */
@Override
public Void call() {
processBatch();
return null;
}
/** Leases a batch of tasks tagged with the zone name from the pull queue and processes them. */
private void processBatch() {
if (LifecycleManager.getInstance().isShuttingDown()) {
logger.infofmt("%s: lifecycle manager is shutting down", tld);
return;
}
if (Registry.get(tld).getDnsPaused()) {
logger.infofmt("%s: the dns-pull queue is paused", tld);
return;
}
// Make a defensive copy to allow mutations.
List<TaskHandle> tasks = new ArrayList<>(dnsQueue.leaseTasks(tld));
if (tasks.isEmpty()) {
logger.infofmt("%s: no tasks in the dns-pull queue", tld);
return;
}
try (DnsWriter writer = writerProvider.get()) {
Iterator<TaskHandle> it = tasks.iterator();
while (it.hasNext()) {
TaskHandle task = it.next();
try {
processTask(writer, task, tld);
} catch (UnsupportedOperationException e) {
// Handle fatal errors by deleting the task.
logger.severefmt(e, "%s: deleting unsupported task %s", tld, task.toString());
dnsQueue.deleteTask(task);
it.remove();
}
}
} catch (RuntimeException e) {
Throwables.propagateIfInstanceOf(e, HttpException.class);
// Handle transient errors by dropping the task leases.
logger.severefmt(e, "%s: dropping leases of failed tasks", tld);
for (TaskHandle task : tasks) {
dnsQueue.dropTaskLease(task);
}
return;
}
for (TaskHandle task : tasks) {
dnsQueue.deleteTask(task);
}
logger.infofmt("%s: batch of %s tasks processed", tld, tasks.size());
}
/** Stages a write to authoritative DNS for this task. */
private static void processTask(DnsWriter writer, TaskHandle task, String tld) {
Map<String, String> params = new HashMap<>();
try {
for (Map.Entry<String, String> entry : task.extractParams()) {
params.put(entry.getKey(), entry.getValue());
}
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
TargetType type = TargetType.valueOf(params.get(DNS_TARGET_TYPE_PARAM));
String name = checkNotNull(params.get(DNS_TARGET_NAME_PARAM));
switch (type) {
case DOMAIN:
checkRequestArgument(
DomainNameUtils.isUnder(InternetDomainName.from(name), InternetDomainName.from(tld)),
"domain name %s is not under tld %s", name, tld);
writer.publishDomain(name);
break;
case HOST:
checkRequestArgument(
DomainNameUtils.isUnder(InternetDomainName.from(name), InternetDomainName.from(tld)),
"host name %s is not under tld %s", name, tld);
writer.publishHost(name);
break;
default:
// TODO(b/11592394): Write a full zone.
throw new UnsupportedOperationException(String.format("unexpected Type: %s", type));
}
}
private static void checkRequestArgument(boolean condition, String format, Object... args) {
if (!condition) {
throw new BadRequestException(String.format(format, args));
}
}
}

View file

@ -1,15 +0,0 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
java_library(
name = "api",
srcs = glob(["*.java"]),
deps = [
"//java/com/google/common/base",
"//third_party/java/dagger",
"//third_party/java/jsr305_annotations",
"//third_party/java/jsr330_inject",
],
)

View file

@ -1,55 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns.writer.api;
/**
* Transaction object for sending an atomic batch of updates for a single zone to the DNS server.
*
* <p>Here's an example of how you would publish updates for a domain and host:
* <pre>
* &#064;Inject Provider&lt;DnsWriter&gt; dnsWriter;
* try (DnsWriter writer = dnsWriter.get()) {
* writer.publishDomain(domainName);
* writer.publishHost(hostName);
* }
* </pre>
*/
public interface DnsWriter extends AutoCloseable {
/**
* Loads {@code domainName} from datastore and publishes its NS/DS records to the DNS server.
* Replaces existing records for the exact name supplied with an NS record for each name server
* and a DS record for each delegation signer stored in the registry for the supplied domain name.
* If the domain is deleted or is in a "non-publish" state then any existing records are deleted.
*
* @param domainName the fully qualified domain name, with no trailing dot
*/
void publishDomain(String domainName);
/**
* Loads {@code hostName} from datastore and publishes its A/AAAA glue records to the DNS server.
* Replaces existing records for the exact name supplied, with an A or AAAA record (as
* appropriate) for each address stored in the registry, for the supplied host name. If the host is
* deleted then the existing records are deleted. Assumes that this method will only be called for
* in-bailiwick hosts. The registry does not have addresses for other hosts.
*
* @param hostName the fully qualified host name, with no trailing dot
*/
void publishHost(String hostName);
/** Commits the updates to the DNS server atomically. */
@Override
void close();
}

View file

@ -1,24 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns.writer.api;
import java.lang.annotation.Documented;
import javax.inject.Qualifier;
/** Dagger qualifier for the fully-qualified zone name that's being updated. */
@Qualifier
@Documented
public @interface DnsWriterZone {}

View file

@ -1,49 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns.writer.api;
import com.google.common.base.Joiner;
import java.util.HashSet;
import java.util.Set;
import java.util.logging.Logger;
/**
* {@link DnsWriter} that doesn't actually update records in a DNS server.
*
* <p>All this class does is write its displeasure to the logs.
*/
public final class VoidDnsWriter implements DnsWriter {
private static final Logger logger = Logger.getLogger(VoidDnsWriter.class.getName());
private final Set<String> names = new HashSet<>();
@Override
public void publishDomain(String domainName) {
names.add(domainName);
}
@Override
public void publishHost(String hostName) {
names.add(hostName);
}
@Override
public void close() {
logger.warning("Ignoring DNS zone updates! No DnsWriterFactory implementation specified!\n"
+ Joiner.on('\n').join(names));
}
}

View file

@ -1,28 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns.writer.api;
import dagger.Module;
import dagger.Provides;
/** Dagger module that disables DNS updates. */
@Module
public final class VoidDnsWriterModule {
@Provides
static DnsWriter provideDnsWriter() {
return new VoidDnsWriter();
}
}

View file

@ -1,26 +0,0 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
java_library(
name = "dnsupdate",
srcs = glob(["*.java"]),
deps = [
"//java/com/google/common/annotations",
"//java/com/google/common/base",
"//java/com/google/common/collect",
"//java/com/google/common/io",
"//java/com/google/common/net",
"//java/com/google/common/primitives",
"//java/com/google/domain/registry/config",
"//java/com/google/domain/registry/dns/writer/api",
"//java/com/google/domain/registry/model",
"//java/com/google/domain/registry/util",
"//third_party/java/joda_time",
"//third_party/java/dagger",
"//third_party/java/dnsjava",
"//third_party/java/jsr305_annotations",
"//third_party/java/jsr330_inject",
],
)

View file

@ -1,135 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns.writer.dnsupdate;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Verify.verify;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.primitives.Ints;
import com.google.domain.registry.config.ConfigModule.Config;
import org.joda.time.Duration;
import org.xbill.DNS.Message;
import org.xbill.DNS.Opcode;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.InetAddress;
import java.net.Socket;
import java.nio.ByteBuffer;
import javax.inject.Inject;
import javax.net.SocketFactory;
/**
* A transport for DNS messages. Sends/receives DNS messages over TCP using old-style {@link Socket}
* s and the message framing defined in <a href="https://tools.ietf.org/html/rfc1035">RFC 1035</a>.
* We would like use the dnsjava library's {@link SimpleResolver} class for this, but it requires
* {@link SocketChannel} which is not supported on AppEngine.
*/
public class DnsMessageTransport {
/**
* Size of message length field for DNS TCP transport.
*
* @see <a href="https://tools.ietf.org/html/rfc1035">RFC 1035</a>
*/
static final int MESSAGE_LENGTH_FIELD_BYTES = 2;
private static final int MESSAGE_MAXIMUM_LENGTH = (1 << (MESSAGE_LENGTH_FIELD_BYTES * 8)) - 1;
/**
* The standard DNS port number.
*
* @see <a href="https://tools.ietf.org/html/rfc1035">RFC 1035</a>
*/
@VisibleForTesting static final int DNS_PORT = 53;
private final SocketFactory factory;
private final String updateHost;
private final int updateTimeout;
/**
* Class constructor.
*
* @param factory a factory for TCP sockets
* @param updateHost host name of the DNS server
* @param updateTimeout update I/O timeout
*/
@Inject
public DnsMessageTransport(
SocketFactory factory,
@Config("dnsUpdateHost") String updateHost,
@Config("dnsUpdateTimeout") Duration updateTimeout) {
this.factory = factory;
this.updateHost = updateHost;
this.updateTimeout = Ints.checkedCast(updateTimeout.getMillis());
}
/**
* Sends a DNS "query" message (most likely an UPDATE) and returns the response. The response is
* checked for matching ID and opcode.
*
* @param query a message to send
* @return the response received from the server
* @throws IOException if the Socket input/output streams throws one
* @throws IllegalArgumentException if the query is too large to be sent (> 65535 bytes)
*/
public Message send(Message query) throws IOException {
try (Socket socket = factory.createSocket(InetAddress.getByName(updateHost), DNS_PORT)) {
socket.setSoTimeout(updateTimeout);
writeMessage(socket.getOutputStream(), query);
Message response = readMessage(socket.getInputStream());
checkValidResponse(query, response);
return response;
}
}
private void checkValidResponse(Message query, Message response) {
verify(
response.getHeader().getID() == query.getHeader().getID(),
"response ID %s does not match query ID %s",
response.getHeader().getID(),
query.getHeader().getID());
verify(
response.getHeader().getOpcode() == query.getHeader().getOpcode(),
"response opcode '%s' does not match query opcode '%s'",
Opcode.string(response.getHeader().getOpcode()),
Opcode.string(query.getHeader().getOpcode()));
}
private void writeMessage(OutputStream outputStream, Message message) throws IOException {
byte[] messageData = message.toWire();
checkArgument(
messageData.length <= MESSAGE_MAXIMUM_LENGTH,
"DNS request message larger than maximum of %s: %s",
MESSAGE_MAXIMUM_LENGTH,
messageData.length);
ByteBuffer buffer = ByteBuffer.allocate(messageData.length + MESSAGE_LENGTH_FIELD_BYTES);
buffer.putShort((short) messageData.length);
buffer.put(messageData);
outputStream.write(buffer.array());
}
private Message readMessage(InputStream inputStream) throws IOException {
DataInputStream stream = new DataInputStream(inputStream);
int length = stream.readUnsignedShort();
byte[] messageData = new byte[length];
stream.readFully(messageData);
return new Message(messageData);
}
}

View file

@ -1,54 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns.writer.dnsupdate;
import com.google.domain.registry.config.ConfigModule.Config;
import org.joda.time.Duration;
import dagger.Module;
import dagger.Provides;
@Module
public class DnsUpdateConfigModule {
/**
* Host that receives DNS updates from the registry.
* Usually a "hidden master" for the TLDs.
*/
@Provides
@Config("dnsUpdateHost")
public static String provideDnsUpdateHost() {
return "localhost";
}
/**
* Timeout on the socket for DNS update requests.
*/
@Provides
@Config("dnsUpdateTimeout")
public static Duration provideDnsUpdateTimeout() {
return Duration.standardSeconds(30);
}
/**
* The DNS time-to-live (TTL) for resource records created by the registry.
*/
@Provides
@Config("dnsUpdateTimeToLive")
public static Duration provideDnsUpdateTimeToLive() {
return Duration.standardHours(2);
}
}

View file

@ -1,215 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns.writer.dnsupdate;
import static com.google.common.base.Verify.verify;
import static com.google.domain.registry.model.EppResourceUtils.loadByUniqueId;
import com.google.common.net.InternetDomainName;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.dns.writer.api.DnsWriter;
import com.google.domain.registry.model.domain.DomainResource;
import com.google.domain.registry.model.domain.secdns.DelegationSignerData;
import com.google.domain.registry.model.host.HostResource;
import com.google.domain.registry.model.registry.Registries;
import com.google.domain.registry.util.Clock;
import org.joda.time.Duration;
import org.xbill.DNS.AAAARecord;
import org.xbill.DNS.ARecord;
import org.xbill.DNS.DClass;
import org.xbill.DNS.DSRecord;
import org.xbill.DNS.Message;
import org.xbill.DNS.NSRecord;
import org.xbill.DNS.Name;
import org.xbill.DNS.RRset;
import org.xbill.DNS.Rcode;
import org.xbill.DNS.TextParseException;
import org.xbill.DNS.Type;
import org.xbill.DNS.Update;
import java.io.IOException;
import java.net.Inet4Address;
import java.net.Inet6Address;
import java.net.InetAddress;
import javax.inject.Inject;
/**
* A DnsWriter that implements the DNS UPDATE protocol as specified in
* <a href="https://tools.ietf.org/html/rfc2136">RFC 2136</a>. Publishes changes in the
* domain-registry to a (capable) external DNS server, sometimes called a "hidden master". DNS
* UPDATE messages are sent via a "resolver" class which implements the network transport. For each
* publish call, a single UPDATE message is created containing the records required to "synchronize"
* the DNS with the current (at the time of processing) state of the registry, for the supplied
* domain/host.
*
* <p>The general strategy of the publish methods is to delete <em>all</em> resource records of any
* <em>type</em> that match the exact domain/host name supplied. And then for create/update cases,
* add any required records. Deleting all records of any type assumes that the registry is
* authoritative for all records for names in the zone. This seems appropriate for a TLD DNS server,
* which should only contain records required for proper DNS delegation.
*
* <p>Only NS, DS, A, and AAAA records are published, and in particular no DNSSEC signing is done
* assuming that this will be done by a third party DNS provider.
*
* <p>Each publish call is treated as an atomic update to the DNS. If an update fails an exception is
* thrown, expecting the caller to retry the update later. The SOA record serial number is
* implicitly incremented by the server on each UPDATE message, as required by RFC 2136. Care must
* be taken to make sure the SOA serial number does not go backwards if the entire TLD (zone) is
* "reset" to empty and republished.
*/
public class DnsUpdateWriter implements DnsWriter {
private final Duration dnsTimeToLive;
private final DnsMessageTransport resolver;
private final Clock clock;
/**
* Class constructor.
*
* @param dnsTimeToLive TTL used for any created resource records
* @param resolver a resolver used to send/receive the UPDATE messages
* @param clock a source of time
*/
@Inject
public DnsUpdateWriter(
@Config("dnsUpdateTimeToLive") Duration dnsTimeToLive,
DnsMessageTransport resolver,
Clock clock) {
this.dnsTimeToLive = dnsTimeToLive;
this.resolver = resolver;
this.clock = clock;
}
@Override
public void publishDomain(String domainName) {
DomainResource domain = loadByUniqueId(DomainResource.class, domainName, clock.nowUtc());
try {
Update update = new Update(toAbsoluteName(findTldFromName(domainName)));
update.delete(toAbsoluteName(domainName), Type.ANY);
if (domain != null && domain.shouldPublishToDns()) {
update.add(makeNameServerSet(domainName, domain.loadNameservers()));
update.add(makeDelegationSignerSet(domainName, domain.getDsData()));
}
Message response = resolver.send(update);
verify(
response.getRcode() == Rcode.NOERROR,
"DNS server failed domain update for '%s' rcode: %s",
domainName,
Rcode.string(response.getRcode()));
} catch (IOException e) {
throw new RuntimeException("publishDomain failed: " + domainName, e);
}
}
@Override
public void publishHost(String hostName) {
HostResource host = loadByUniqueId(HostResource.class, hostName, clock.nowUtc());
try {
Update update = new Update(toAbsoluteName(findTldFromName(hostName)));
update.delete(toAbsoluteName(hostName), Type.ANY);
if (host != null) {
update.add(makeAddressSet(hostName, host.getInetAddresses()));
update.add(makeV6AddressSet(hostName, host.getInetAddresses()));
}
Message response = resolver.send(update);
verify(
response.getRcode() == Rcode.NOERROR,
"DNS server failed host update for '%s' rcode: %s",
hostName,
Rcode.string(response.getRcode()));
} catch (IOException e) {
throw new RuntimeException("publishHost failed: " + hostName, e);
}
}
/**
* Does nothing. Publish calls are synchronous and atomic.
*/
@Override
public void close() {}
private RRset makeDelegationSignerSet(String domainName, Iterable<DelegationSignerData> dsData)
throws TextParseException {
RRset signerSet = new RRset();
for (DelegationSignerData signerData : dsData) {
DSRecord dsRecord =
new DSRecord(
toAbsoluteName(domainName),
DClass.IN,
dnsTimeToLive.getStandardSeconds(),
signerData.getKeyTag(),
signerData.getAlgorithm(),
signerData.getDigestType(),
signerData.getDigest());
signerSet.addRR(dsRecord);
}
return signerSet;
}
private RRset makeNameServerSet(String domainName, Iterable<HostResource> nameservers)
throws TextParseException {
RRset nameServerSet = new RRset();
for (HostResource host : nameservers) {
NSRecord record =
new NSRecord(
toAbsoluteName(domainName),
DClass.IN,
dnsTimeToLive.getStandardSeconds(),
toAbsoluteName(host.getFullyQualifiedHostName()));
nameServerSet.addRR(record);
}
return nameServerSet;
}
private RRset makeAddressSet(String hostName, Iterable<InetAddress> addresses)
throws TextParseException {
RRset addressSet = new RRset();
for (InetAddress address : addresses) {
if (address instanceof Inet4Address) {
ARecord record =
new ARecord(
toAbsoluteName(hostName), DClass.IN, dnsTimeToLive.getStandardSeconds(), address);
addressSet.addRR(record);
}
}
return addressSet;
}
private RRset makeV6AddressSet(String hostName, Iterable<InetAddress> addresses)
throws TextParseException {
RRset addressSet = new RRset();
for (InetAddress address : addresses) {
if (address instanceof Inet6Address) {
AAAARecord record =
new AAAARecord(
toAbsoluteName(hostName), DClass.IN, dnsTimeToLive.getStandardSeconds(), address);
addressSet.addRR(record);
}
}
return addressSet;
}
private String findTldFromName(String name) {
return Registries.findTldForNameOrThrow(InternetDomainName.from(name)).toString();
}
private Name toAbsoluteName(String name) throws TextParseException {
return Name.fromString(name, Name.root);
}
}

View file

@ -1,37 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.dns.writer.dnsupdate;
import com.google.domain.registry.dns.writer.api.DnsWriter;
import javax.net.SocketFactory;
import dagger.Module;
import dagger.Provides;
/** Dagger module that provides a DnsUpdateWriter. */
@Module
public final class DnsUpdateWriterModule {
@Provides
static DnsWriter provideDnsWriter(DnsUpdateWriter dnsWriter) {
return dnsWriter;
}
@Provides
static SocketFactory provideSocketFactory() {
return SocketFactory.getDefault();
}
}

View file

@ -1,65 +0,0 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
java_binary(
name = "annotation_processors_ide",
create_executable = False,
runtime_deps = [
"@auto_factory//jar",
"@auto_service//jar",
"@auto_value//jar",
"@javawriter//jar",
"//third_party/java/dagger:dagger-compiler",
],
)
java_binary(
name = "registry_deps",
create_executable = 0,
runtime_deps = [
"//java/com/google/common/testing",
"//java/com/google/domain/registry/module/frontend",
"//java/com/google/domain/registry/module/backend",
"//java/com/google/domain/registry/module/tools",
"//java/com/google/domain/registry/tools",
"//third_party/java/appengine:appengine-api-link",
"//third_party/java/appengine:appengine-remote-api-link",
"//third_party/java/appengine:appengine-stubs",
"//third_party/java/appengine:appengine-integration-testing",
"//third_party/java/appengine:appengine-testing",
"//third_party/java/apache_sshd",
"//third_party/java/ftpserver",
"//third_party/java/hamcrest",
"//third_party/java/jetty/v6_1_22",
"//third_party/java/junit",
"//third_party/java/mockito",
"//third_party/java/truth",
],
)
# This rule creates a copy of the registry_deps_deploy.jar with all of the
# domain-registry code removed. This is to avoid having duplicate instances
# of domain-registry classes on the eclipse build path.
genrule(
name = "eclipse_deps",
srcs = ["//java/com/google/domain/registry/eclipse:registry_deps_deploy.jar"],
outs = ["eclipse_deps.jar"],
tools = [
"@local_jdk//:bin/jar",
"@local_jdk//:jdk-lib",
"@local_jdk//:jre-default",
],
cmd = " && ".join([
"JAR=$$(pwd)/$(location @local_jdk//:bin/jar)",
"IN=$$(pwd)/$(SRCS)",
"OUT=$$(pwd)/$@",
"TMP=$$(mktemp -d $${TMPDIR:-/tmp}/eclipse_deps.XXXXXXXX)",
"cd $$TMP",
"$$JAR -xf $$IN",
"rm -rf com/google/domain/registry",
"$$JAR -cmf META-INF/MANIFEST.MF eclipse_deps.jar .",
"mv eclipse_deps.jar $$OUT",
"rm -rf $$TMP",
]),
)

View file

@ -1,4 +0,0 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)

View file

@ -1,5 +0,0 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)

View file

@ -1,4 +0,0 @@
<?xml version="1.0" encoding="utf-8" standalone="no"?>
<appengine-application xmlns="http://appengine.google.com/ns/1.0">
<application>domain-registry</application>
</appengine-application>

View file

@ -1,34 +0,0 @@
<?xml version="1.0"
encoding="UTF-8"?>
<application
xmlns="http://java.sun.com/xml/ns/javaee"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://java.sun.com/xml/ns/javaee
http://java.sun.com/xml/ns/javaee/application_5.xsd"
version="5">
<description>Google Registry</description>
<display-name>Google Registry</display-name>
<!-- Modules -->
<!-- The default module should be listed first -->
<module>
<web>
<web-uri>default</web-uri>
<context-root>default</context-root>
</web>
</module>
<module>
<web>
<web-uri>backend</web-uri>
<context-root>backend</context-root>
</web>
</module>
<module>
<web>
<web-uri>tools</web-uri>
<context-root>tools</context-root>
</web>
</module>
</application>

View file

@ -1,7 +0,0 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
exports_files(glob(["WEB-INF/*"]))

View file

@ -1,13 +0,0 @@
# A default java.util.logging configuration.
# (All App Engine logging is through java.util.logging by default).
#
# To use this configuration, copy it into your application's WEB-INF
# folder and add the following to your appengine-web.xml:
#
# <system-properties>
# <property name="java.util.logging.config.file" value="WEB-INF/logging.properties"/>
# </system-properties>
#
# Set the default logging level for all loggers to INFO.
.level = INFO

View file

@ -1,355 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<web-app xmlns="http://java.sun.com/xml/ns/javaee" version="2.5"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://java.sun.com/xml/ns/javaee
http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd">
<!-- Servlets -->
<!-- Servlet for injected backends actions -->
<servlet>
<display-name>BackendServlet</display-name>
<servlet-name>backend-servlet</servlet-name>
<servlet-class>com.google.domain.registry.module.backend.BackendServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/metrics</url-pattern>
</servlet-mapping>
<!-- RDE -->
<!--
Responsible for scanning the database to create a full deposit for a single TLD
and streaming it to cloud storage. Requests are sent here by App Engine after
`RdeCreateCronServlet` enqueues a task specifying a URL that points to this servlet.
-->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/rdeStaging</url-pattern>
</servlet-mapping>
<!--
Once `rdeCreateFullCron` finishes writing a deposit file to cloud storage, it'll
launch this task with the cloud filename so it can be uploaded to Iron Mountain
via SFTP. The file is deleted once the upload completes. This should be run via
`rde-upload-backend`.
-->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/rdeUpload</url-pattern>
</servlet-mapping>
<!-- Sends an XML RDE report to ICANN's HTTP server after rdeUploadTask finishes. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/rdeReport</url-pattern>
</servlet-mapping>
<!--
Bulk Registration Data Access. This task creates a thin escrow deposit
and saves it to cloud storage, where a separate script owned by the SREs
uploads it to ICANN.
-->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/brdaCopy</url-pattern>
</servlet-mapping>
<!-- Trademark Clearinghouse -->
<!-- Downloads TMCH DNL data from MarksDB. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/tmchDnl</url-pattern>
</servlet-mapping>
<!-- Downloads TMCH SMDRL data from MarksDB. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/tmchSmdrl</url-pattern>
</servlet-mapping>
<!-- Downloads TMCH CRL data from MarksDB. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/tmchCrl</url-pattern>
</servlet-mapping>
<!-- Reads the LORDN queues and uploads CSV data for sunrise and claims marks to MarksDB. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/nordnUpload</url-pattern>
</servlet-mapping>
<!-- Verifies upload of LORDN data to MarksDB. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/nordnVerify</url-pattern>
</servlet-mapping>
<!-- TODO(b/24564175): Remove this entry. -->
<!-- Write DNS updates. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/writeDns</url-pattern>
</servlet-mapping>
<!-- Reads the DNS push and pull queues and kick off the appropriate tasks to update zone. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/cron/readDnsQueue</url-pattern>
</servlet-mapping>
<!-- Publishes DNS updates. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/publishDnsUpdates</url-pattern>
</servlet-mapping>
<!-- Manually refreshes DNS information. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/dnsRefresh</url-pattern>
</servlet-mapping>
<!-- Verifies integrity of database invariants. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/verifyEntityIntegrity</url-pattern>
</servlet-mapping>
<servlet>
<description>Exports a datastore backup snapshot to GCS.</description>
<display-name>Export snapshot to GCS</display-name>
<servlet-name>exportSnapshot</servlet-name>
<servlet-class>com.google.domain.registry.export.ExportSnapshotServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>exportSnapshot</servlet-name>
<url-pattern>/_dr/task/exportSnapshot</url-pattern>
</servlet-mapping>
<servlet>
<description>Checks the completion of a datastore backup snapshot.</description>
<display-name>Check on snapshot status</display-name>
<servlet-name>checkSnapshot</servlet-name>
<servlet-class>com.google.domain.registry.export.CheckSnapshotServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>checkSnapshot</servlet-name>
<url-pattern>/_dr/task/checkSnapshot</url-pattern>
</servlet-mapping>
<!-- Loads a datastore backup snapshot into BigQuery. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/loadSnapshot</url-pattern>
</servlet-mapping>
<!-- Updates a view to point at a certain snapshot in BigQuery. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/updateSnapshotView</url-pattern>
</servlet-mapping>
<!-- Polls state of jobs in Bigquery -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/pollBigqueryJob</url-pattern>
</servlet-mapping>
<!-- Fans out a cron task over an adjustable range of TLDs. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/cron/fanout</url-pattern>
</servlet-mapping>
<!-- Backups. -->
<!-- Fans out a cron task over all commit log buckets. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/cron/commitLogFanout</url-pattern>
</servlet-mapping>
<!-- Deletes old commit logs from datastore. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/deleteOldCommitLogs</url-pattern>
</servlet-mapping>
<!-- Checkpoints commit logs. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/cron/commitLogCheckpoint</url-pattern>
</servlet-mapping>
<!-- Exports commit log diffs. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/exportCommitLogDiff</url-pattern>
</servlet-mapping>
<!-- Restores commit logs. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/restoreCommitLogs</url-pattern>
</servlet-mapping>
<!-- Deletes commit logs. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/killCommitLogs</url-pattern>
</servlet-mapping>
<!-- MapReduce servlet. -->
<servlet>
<servlet-name>mapreduce</servlet-name>
<servlet-class>com.google.appengine.tools.mapreduce.MapReduceServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>mapreduce</servlet-name>
<url-pattern>/_dr/mapreduce/*</url-pattern>
</servlet-mapping>
<!-- Pipeline GUI servlets. -->
<servlet>
<servlet-name>pipeline</servlet-name>
<servlet-class>com.google.appengine.tools.pipeline.impl.servlets.PipelineServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>pipeline</servlet-name>
<url-pattern>/_ah/pipeline/*</url-pattern>
</servlet-mapping>
<!-- Syncs registrars to the registrar spreadsheet. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/syncRegistrarsSheet</url-pattern>
</servlet-mapping>
<!-- Exports TLD reserved terms. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/exportReservedTerms</url-pattern>
</servlet-mapping>
<!-- Syncs RegistrarContact changes to Google Groups. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/syncGroupMembers</url-pattern>
</servlet-mapping>
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/exportDomainLists</url-pattern>
</servlet-mapping>
<!-- Deletes the specified contact resource if it is not referenced by any domains. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/deleteContactResource</url-pattern>
</servlet-mapping>
<!-- Deletes the specified host resource if it is not referenced by any domains. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/deleteHostResource</url-pattern>
</servlet-mapping>
<!-- Enqueues DNS update tasks following a host rename. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/dnsRefreshForHostRename</url-pattern>
</servlet-mapping>
<!-- Security config -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Internal</web-resource-name>
<description>
Any request path starting with `/_dr/` will be restricted to requests originating
from the backend or by anyone authenticated to a Google account that's listed in
the AppEngine control panel settings for this project as a Viewer/Owner/Developer.
The `_dr` is short for Domain Registry to follow AppEngine naming conventions.
</description>
<url-pattern>/_dr/*</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- Require TLS on all requests. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Secure</web-resource-name>
<description>
Require encryption for all paths. http URLs will be redirected to https.
</description>
<url-pattern>/*</url-pattern>
</web-resource-collection>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- Shut down external access to registrar console. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Internal</web-resource-name>
<description>
Go home rogue registrar!
</description>
<url-pattern>/registrar*</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- Restrict access to source code. -->
<!-- These are only included in the default module war, but restricting here too for safety. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Internal</web-resource-name>
<description>No soup for you!</description>
<url-pattern>/assets/sources/*</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- See: https://code.google.com/p/objectify-appengine/wiki/Setup -->
<filter>
<filter-name>ObjectifyFilter</filter-name>
<filter-class>com.googlecode.objectify.ObjectifyFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>ObjectifyFilter</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
<!-- Register types with Objectify. -->
<filter>
<filter-name>OfyFilter</filter-name>
<filter-class>com.google.domain.registry.model.ofy.OfyFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>OfyFilter</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
</web-app>

View file

@ -1,7 +0,0 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
exports_files(glob(["WEB-INF/*"]))

View file

@ -1,83 +0,0 @@
<datastore-indexes autoGenerate="false">
<!-- For finding contact resources by registrar. -->
<datastore-index kind="ContactResource" ancestor="false" source="manual">
<property name="currentSponsorClientId" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- For finding domain resources by registrar. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="^i" direction="asc"/>
<property name="currentSponsorClientId" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- For finding domain resources by tld. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="^i" direction="asc"/>
<property name="tld" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- For finding domain resources by registrar. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="currentSponsorClientId" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- For finding host resources by registrar. -->
<datastore-index kind="HostResource" ancestor="false" source="manual">
<property name="currentSponsorClientId" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- For finding account balance of Registrar and viewing billing history. -->
<datastore-index kind="RegistrarBillingEntry" ancestor="true" source="manual">
<property name="currency" direction="asc"/>
<property name="created" direction="desc"/>
</datastore-index>
<!-- For determining the active domains linked to a given contact. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="allContacts.contactId.linked" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- For determining the active domains linked to a given host. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="nameservers.linked" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- For updating domains and applications after a host rename. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="^i" direction="asc"/>
<property name="nameservers.linked" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- For Whois ip lookup -->
<datastore-index kind="HostResource" ancestor="false" source="manual">
<property name="inetAddresses" direction="asc"/>
<property name="deletionTime" direction="asc"/>
</datastore-index>
<!-- For Poll -->
<datastore-index kind="PollMessage" ancestor="false" source="manual">
<property name="clientId" direction="asc"/>
<property name="eventTime" direction="asc"/>
</datastore-index>
<datastore-index kind="PollMessage" ancestor="true" source="manual">
<property name="clientId" direction="asc"/>
<property name="eventTime" direction="asc"/>
</datastore-index>
<!-- For the history viewer. -->
<datastore-index kind="HistoryEntry" ancestor="true" source="manual">
<property name="modificationTime" direction="asc"/>
</datastore-index>
<!-- For RDAP. -->
<!-- TODO(b/25644770): Backfill these indexes. -->
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="^i" direction="asc"/>
<property name="fullyQualifiedDomainName" direction="asc"/>
</datastore-index>
<datastore-index kind="DomainBase" ancestor="false" source="manual">
<property name="^i" direction="asc"/>
<property name="tld" direction="asc"/>
<property name="fullyQualifiedDomainName" direction="asc"/>
</datastore-index>
<datastore-index kind="HostResource" ancestor="false" source="manual">
<property name="deletionTime" direction="asc"/>
<property name="fullyQualifiedHostName" direction="asc"/>
</datastore-index>
</datastore-indexes>

View file

@ -1,16 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<blacklistentries>
<!-- Example IPv4 CIDR Subnet
<blacklist>
<subnet>1.2.3.4/24</subnet>
<description>An IPv4 subnet</description>
</blacklist> -->
<!-- Example IPv6 CIDR Subnet
<blacklist>
<subnet>abcd::123:4567/48</subnet>
<description>An IPv6 subnet</description>
</blacklist> -->
</blacklistentries>

View file

@ -1,13 +0,0 @@
# A default java.util.logging configuration.
# (All App Engine logging is through java.util.logging by default).
#
# To use this configuration, copy it into your application's WEB-INF
# folder and add the following to your appengine-web.xml:
#
# <system-properties>
# <property name="java.util.logging.config.file" value="WEB-INF/logging.properties"/>
# </system-properties>
#
# Set the default logging level for all loggers to INFO.
.level = INFO

View file

@ -1,318 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<queue-entries>
<queue>
<name>default</name>
<rate>1/s</rate>
<bucket-size>5</bucket-size>
</queue>
<queue>
<name>dns-cron</name>
<!-- There is no point allowing more than 10/s because the pull queue that feeds
this job will refuse to service more than 10 qps. See
https://cloud.google.com/appengine/docs/java/javadoc/com/google/appengine/api/taskqueue/Queue#leaseTasks-long-java.util.concurrent.TimeUnit-long- -->
<rate>10/s</rate>
<bucket-size>100</bucket-size>
<retry-parameters>
<task-retry-limit>1</task-retry-limit>
</retry-parameters>
</queue>
<queue>
<name>dns-pull</name>
<mode>pull</mode>
</queue>
<queue>
<name>dns-publish</name>
<rate>100/s</rate>
<bucket-size>100</bucket-size>
</queue>
<queue>
<name>export</name>
<rate>10/s</rate>
<bucket-size>100</bucket-size>
<retry-parameters>
<task-retry-limit>1</task-retry-limit>
</retry-parameters>
</queue>
<!-- Queue for launching asynchronous actions (e.g. mapreduces) from async flows. -->
<queue>
<name>flows-async</name>
<!-- Note: rate-limiting a bit here because each of these will kick off an MR.
TODO(b/26140521): do more intelligent/aggressive batching than this. -->
<rate>1/m</rate>
<!-- Async flow tasks should run on the backend module. -->
<target>backend</target>
</queue>
<queue>
<name>delete-commits</name>
<rate>10/s</rate>
<bucket-size>100</bucket-size>
<retry-parameters>
<task-retry-limit>1</task-retry-limit>
</retry-parameters>
</queue>
<queue>
<name>export-commits</name>
<rate>10/s</rate>
<bucket-size>100</bucket-size>
<retry-parameters>
<!-- Retry aggressively since a single delayed export increases our time window of
unrecoverable data loss in the event of a datastore failure. -->
<min-backoff-seconds>1</min-backoff-seconds>
<max-backoff-seconds>60</max-backoff-seconds>
<!-- No age limit; a failed export should be retried as long as possible to avoid
having data missing from our exported commit log record. -->
</retry-parameters>
</queue>
<!-- Queue for jobs to export reserved terms to Google Drive for a TLD. -->
<queue>
<name>export-reserved-terms</name>
<rate>1/s</rate>
<bucket-size>100</bucket-size>
<retry-parameters>
<task-retry-limit>3</task-retry-limit>
</retry-parameters>
</queue>
<!-- Queue for jobs to sync RegistrarContact changes to Google Groups. -->
<queue>
<name>group-members-sync</name>
<rate>1/s</rate>
<bucket-size>100</bucket-size>
<retry-parameters>
<task-retry-limit>3</task-retry-limit>
</retry-parameters>
</queue>
<!-- Queue for polling export BigQuery jobs for completion. -->
<queue>
<name>export-bigquery-poll</name>
<!-- Limit queue to 5 concurrent tasks and 5 per second to avoid hitting BigQuery quotas. -->
<rate>5/s</rate>
<bucket-size>5</bucket-size>
<max-concurrent-requests>5</max-concurrent-requests>
<!-- Check every 20s and increase interval to every 5 minutes. -->
<retry-parameters>
<min-backoff-seconds>20</min-backoff-seconds>
<max-backoff-seconds>300</max-backoff-seconds>
<max-doublings>2</max-doublings>
</retry-parameters>
</queue>
<!-- Queue for launching new snapshots and for triggering the initial BigQuery load jobs. -->
<queue>
<name>export-snapshot</name>
<rate>5/m</rate>
<retry-parameters>
<!-- Should be less than the exportSnapshot cron interval; see cron.xml. -->
<task-age-limit>22h</task-age-limit>
<!-- Retry starting at a 5m interval and increasing up to a 30m interval. -->
<min-backoff-seconds>300</min-backoff-seconds>
<max-backoff-seconds>1800</max-backoff-seconds>
<task-retry-limit>10</task-retry-limit>
</retry-parameters>
</queue>
<!-- Queue for polling managed backup snapshots for completion. -->
<queue>
<name>export-snapshot-poll</name>
<rate>5/m</rate>
<retry-parameters>
<!-- Should be less than the exportSnapshot cron interval; see cron.xml. -->
<task-age-limit>22h</task-age-limit>
<!-- Retry starting at a 1m interval and increasing up to a 5m interval. -->
<min-backoff-seconds>60</min-backoff-seconds>
<max-backoff-seconds>300</max-backoff-seconds>
</retry-parameters>
</queue>
<!-- Queue for updating BigQuery views after a snapshot kind's load job completes. -->
<queue>
<name>export-snapshot-update-view</name>
<rate>1/s</rate>
<retry-parameters>
<!-- Should be less than the exportSnapshot cron interval; see cron.xml. -->
<task-age-limit>22h</task-age-limit>
<!-- Retry starting at a 10s interval and increasing up to a 1m interval. -->
<min-backoff-seconds>10</min-backoff-seconds>
<max-backoff-seconds>60</max-backoff-seconds>
<task-retry-limit>10</task-retry-limit>
</retry-parameters>
</queue>
<queue>
<name>mail</name>
<rate>5/m</rate>
<bucket-size>10</bucket-size>
</queue>
<queue>
<name>rde-upload</name>
<rate>1/m</rate>
<max-concurrent-requests>1</max-concurrent-requests>
<retry-parameters>
<task-age-limit>4h</task-age-limit>
</retry-parameters>
</queue>
<queue>
<name>rde-report</name>
<rate>1/s</rate>
<max-concurrent-requests>1</max-concurrent-requests>
<retry-parameters>
<task-age-limit>4h</task-age-limit>
</retry-parameters>
</queue>
<queue>
<name>rde-staging</name>
<rate>1/m</rate>
<max-concurrent-requests>10</max-concurrent-requests>
<retry-parameters>
<task-age-limit>4h</task-age-limit>
</retry-parameters>
</queue>
<queue>
<name>brda</name>
<rate>1/m</rate>
<max-concurrent-requests>10</max-concurrent-requests>
<retry-parameters>
<task-age-limit>23h</task-age-limit>
</retry-parameters>
</queue>
<!-- Queue for tasks that communicate with TMCH MarksDB webserver. -->
<!-- TODO(b/17623181): Delete this once the queue implementation is live and working. -->
<queue>
<name>marksdb</name>
<rate>1/m</rate>
<max-concurrent-requests>1</max-concurrent-requests>
<retry-parameters>
<task-age-limit>11h</task-age-limit> <!-- cron interval minus hour -->
</retry-parameters>
</queue>
<!-- Queue for tasks to produce LORDN CSV reports, either by by the query or queue method. -->
<queue>
<name>nordn</name>
<rate>1/s</rate>
<max-concurrent-requests>10</max-concurrent-requests>
<retry-parameters>
<task-age-limit>11h</task-age-limit> <!-- cron interval minus hour -->
</retry-parameters>
</queue>
<!-- Queue for LORDN Claims CSV rows to be periodically queried and then uploaded in batches. -->
<queue>
<name>lordn-claims</name>
<mode>pull</mode>
</queue>
<!-- Queue for LORDN Sunrise CSV rows to be periodically queried and then uploaded in batches. -->
<queue>
<name>lordn-sunrise</name>
<mode>pull</mode>
</queue>
<!-- Queue used by the MapReduce library for running tasks.
Do not re-use this queue for tasks that our code creates (e.g. tasks to launch MapReduces
that aren't themselves part of a running MapReduce).-->
<queue>
<name>mapreduce</name>
<!-- Warning: DO NOT SET A <target> parameter for this queue. See b/24782801 for why. -->
<rate>500/s</rate>
<bucket-size>100</bucket-size>
</queue>
<!-- Queue for tasks that sync data to Google Spreadsheets. -->
<queue>
<name>sheet</name>
<rate>1/s</rate>
<!-- max-concurrent-requests is intentionally omitted. -->
<retry-parameters>
<task-age-limit>1h</task-age-limit>
</retry-parameters>
</queue>
<!-- queue for whitebox metrics -->
<queue>
<name>bigquery-streaming-metrics</name>
<rate>500/s</rate>
<bucket-size>500</bucket-size>
<retry-parameters>
<task-retry-limit>1</task-retry-limit>
<task-age-limit>1m</task-age-limit>
</retry-parameters>
</queue>
<queue>
<name>load0</name>
<rate>500/s</rate>
<bucket-size>500</bucket-size>
</queue>
<queue>
<name>load1</name>
<rate>500/s</rate>
<bucket-size>500</bucket-size>
</queue>
<queue>
<name>load2</name>
<rate>500/s</rate>
<bucket-size>500</bucket-size>
</queue>
<queue>
<name>load3</name>
<rate>500/s</rate>
<bucket-size>500</bucket-size>
</queue>
<queue>
<name>load4</name>
<rate>500/s</rate>
<bucket-size>500</bucket-size>
</queue>
<queue>
<name>load5</name>
<rate>500/s</rate>
<bucket-size>500</bucket-size>
</queue>
<queue>
<name>load6</name>
<rate>500/s</rate>
<bucket-size>500</bucket-size>
</queue>
<queue>
<name>load7</name>
<rate>500/s</rate>
<bucket-size>500</bucket-size>
</queue>
<queue>
<name>load8</name>
<rate>500/s</rate>
<bucket-size>500</bucket-size>
</queue>
<queue>
<name>load9</name>
<rate>500/s</rate>
<bucket-size>500</bucket-size>
</queue>
</queue-entries>

View file

@ -1,264 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<web-app xmlns="http://java.sun.com/xml/ns/javaee" version="2.5"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://java.sun.com/xml/ns/javaee
http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd">
<!-- Servlets -->
<servlet>
<description>
This is the primary EPP endpoint for the Registry. It accepts
EPP XHRs from our TLS proxy.
</description>
<display-name>EPP</display-name>
<servlet-name>epp</servlet-name>
<servlet-class>com.google.domain.registry.flows.EppTlsServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>epp</servlet-name>
<url-pattern>/_dr/epp</url-pattern>
</servlet-mapping>
<servlet>
<description>
Registrar Console XHR servlet. Accepts EPP XHRs from GAE GAIA-authenticated frontend sessions.
</description>
<display-name>Registrar Console XHR</display-name>
<servlet-name>registrar-xhr</servlet-name>
<servlet-class>com.google.domain.registry.flows.EppConsoleServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>registrar-xhr</servlet-name>
<url-pattern>/registrar-xhr</url-pattern>
</servlet-mapping>
<servlet>
<display-name>Registrar Self-serve Settings</display-name>
<servlet-name>registrar-settings</servlet-name>
<servlet-class>com.google.domain.registry.ui.server.registrar.RegistrarServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>registrar-settings</servlet-name>
<url-pattern>/registrar-settings</url-pattern>
</servlet-mapping>
<!-- Servlet for injected frontend actions -->
<servlet>
<display-name>FrontendServlet</display-name>
<servlet-name>frontend-servlet</servlet-name>
<servlet-class>com.google.domain.registry.module.frontend.FrontendServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<!-- Registrar Console. -->
<servlet-mapping>
<servlet-name>frontend-servlet</servlet-name>
<url-pattern>/registrar</url-pattern>
</servlet-mapping>
<!-- Registrar Braintree payment form setup. -->
<servlet-mapping>
<servlet-name>frontend-servlet</servlet-name>
<url-pattern>/registrar-payment-setup</url-pattern>
</servlet-mapping>
<!-- Registrar Braintree payment. -->
<servlet-mapping>
<servlet-name>frontend-servlet</servlet-name>
<url-pattern>/registrar-payment</url-pattern>
</servlet-mapping>
<!-- HTTP WHOIS. -->
<servlet-mapping>
<servlet-name>frontend-servlet</servlet-name>
<url-pattern>/whois/*</url-pattern>
</servlet-mapping>
<!-- Protocol WHOIS. -->
<servlet-mapping>
<servlet-name>frontend-servlet</servlet-name>
<url-pattern>/_dr/whois</url-pattern>
</servlet-mapping>
<!-- RDAP (new WHOIS). -->
<servlet-mapping>
<servlet-name>frontend-servlet</servlet-name>
<url-pattern>/rdap/*</url-pattern>
</servlet-mapping>
<!-- Public API to do availability checks -->
<servlet>
<description>
Availability Check API.
</description>
<display-name>Availability Check</display-name>
<servlet-name>check</servlet-name>
<servlet-class>com.google.domain.registry.ui.server.api.CheckApiServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>check</servlet-name>
<url-pattern>/check</url-pattern>
</servlet-mapping>
<!-- Security config -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Internal</web-resource-name>
<description>
Any request path starting with `/_dr/` will be restricted to requests originating
from the backend or by anyone authenticated to a Google account that's listed in
the AppEngine control panel settings for this project as a Viewer/Owner/Developer.
The `_dr` is short for Domain Registry to follow AppEngine naming conventions.
</description>
<url-pattern>/_dr/*</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
</security-constraint>
<security-constraint>
<web-resource-collection>
<web-resource-name>profile-registrar-xhr</web-resource-name>
<description>
Only allow logged-in users to even try to issue EPP commands. This is an additional
layer of safety on top of in-servlet authentication and XSRF protection.
</description>
<url-pattern>/registrar-xhr</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>*</role-name>
</auth-constraint>
</security-constraint>
<security-constraint>
<web-resource-collection>
<web-resource-name>profile-registrar-settings</web-resource-name>
<description>
Only allow logged-in users to even try to change registrar settings. This is an additional
layer of safety on top of in-servlet authentication and XSRF protection.
</description>
<url-pattern>/registrar-settings</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>*</role-name>
</auth-constraint>
</security-constraint>
<security-constraint>
<web-resource-collection>
<web-resource-name>profile-registrar-payment</web-resource-name>
<description>
Only allow logged-in users to even try to change registrar settings. This is an additional
layer of safety on top of in-servlet authentication and XSRF protection.
</description>
<url-pattern>/registrar-payment</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>*</role-name>
</auth-constraint>
</security-constraint>
<security-constraint>
<web-resource-collection>
<web-resource-name>profile-registrar-payment-token</web-resource-name>
<description>
Only allow logged-in users to even try to change registrar settings. This is an additional
layer of safety on top of in-servlet authentication and XSRF protection.
</description>
<url-pattern>/registrar-payment-token</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>*</role-name>
</auth-constraint>
</security-constraint>
<!-- Require TLS on all requests. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Secure</web-resource-name>
<description>
Require encryption for all paths. http URLs will be redirected to https.
</description>
<url-pattern>/*</url-pattern>
</web-resource-collection>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- Restrict access to source code. -->
<!-- This directory contains all the JavaScript sources verbatim. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>internal-sources</web-resource-name>
<description>No soup for you!</description>
<url-pattern>/assets/sources/*</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
</security-constraint>
<!-- TODO(b/26776367): Move these files to /assets/sources. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>registrar-bin-js-map</web-resource-name>
<description>No soup for you!</description>
<url-pattern>/assets/js/registrar_bin.js.map</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
</security-constraint>
<security-constraint>
<web-resource-collection>
<web-resource-name>registrar-dbg-js</web-resource-name>
<description>No soup for you!</description>
<url-pattern>/assets/js/registrar_dbg.js</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
</security-constraint>
<security-constraint>
<web-resource-collection>
<web-resource-name>brain-bin-js-map</web-resource-name>
<description>No soup for you!</description>
<url-pattern>/assets/js/brain_bin.js.map</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
</security-constraint>
<security-constraint>
<web-resource-collection>
<web-resource-name>registrar-dbg-css</web-resource-name>
<description>No soup for you!</description>
<url-pattern>/assets/css/registrar_dbg.css</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
</security-constraint>
<!-- See: https://code.google.com/p/objectify-appengine/wiki/Setup -->
<filter>
<filter-name>ObjectifyFilter</filter-name>
<filter-class>com.googlecode.objectify.ObjectifyFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>ObjectifyFilter</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
<!-- Register types with Objectify. -->
<filter>
<filter-name>OfyFilter</filter-name>
<filter-class>com.google.domain.registry.model.ofy.OfyFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>OfyFilter</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
</web-app>

View file

@ -1,7 +0,0 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
exports_files(glob(["WEB-INF/*"]))

View file

@ -1,13 +0,0 @@
# A default java.util.logging configuration.
# (All App Engine logging is through java.util.logging by default).
#
# To use this configuration, copy it into your application's WEB-INF
# folder and add the following to your appengine-web.xml:
#
# <system-properties>
# <property name="java.util.logging.config.file" value="WEB-INF/logging.properties"/>
# </system-properties>
#
# Set the default logging level for all loggers to INFO.
.level = INFO

View file

@ -1,239 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<web-app xmlns="http://java.sun.com/xml/ns/javaee" version="2.5"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://java.sun.com/xml/ns/javaee
http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd">
<!-- Servlets -->
<!-- Servlet for injected tools actions -->
<servlet>
<display-name>ToolsServlet</display-name>
<servlet-name>tools-servlet</servlet-name>
<servlet-class>com.google.domain.registry.module.tools.ToolsServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/admin/verifyOte</url-pattern>
</servlet-mapping>
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/admin/createGroups</url-pattern>
</servlet-mapping>
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/admin/createPremiumList</url-pattern>
</servlet-mapping>
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/admin/list/*</url-pattern>
</servlet-mapping>
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/admin/deleteEntity</url-pattern>
</servlet-mapping>
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/admin/updatePremiumList</url-pattern>
</servlet-mapping>
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/loadtest</url-pattern>
</servlet-mapping>
<!-- Command line tool uses this endpoint to modify the datastore. -->
<servlet>
<display-name>Remote API Servlet</display-name>
<servlet-name>RemoteApiServlet</servlet-name>
<servlet-class>com.google.apphosting.utils.remoteapi.RemoteApiServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>RemoteApiServlet</servlet-name>
<url-pattern>/remote_api</url-pattern>
</servlet-mapping>
<!-- ExecuteEppCommand uses this to execute remotely. -->
<servlet>
<description>
Execute epp from the registry tool.
</description>
<display-name>Registry tool EPP endpoint</display-name>
<servlet-name>epptool</servlet-name>
<servlet-class>com.google.domain.registry.flows.EppToolServlet</servlet-class>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>epptool</servlet-name>
<url-pattern>/_dr/epptool</url-pattern>
</servlet-mapping>
<!-- Mapreduce to delete all prober data. -->
<servlet-mapping>
<!-- TODO(b/27309488): maybe move this to the backend module + BackendServlet. -->
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/task/deleteProberData</url-pattern>
</servlet-mapping>
<!-- Mapreduce to re-save all EppResources. -->
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/task/resaveAllEppResources</url-pattern>
</servlet-mapping>
<!-- Mapreduce to count recurring billing events (to test the child entity reader). -->
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/task/countRecurringBillingEvents</url-pattern>
</servlet-mapping>
<!-- Mapreduce to backfill new autorenew flag on recurring billing events. -->
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/task/backfillAutorenewBillingFlag</url-pattern>
</servlet-mapping>
<!-- Mapreduce to delete EppResources, children, and indices. -->
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/task/killAllEppResources</url-pattern>
</servlet-mapping>
<!-- Mapreduce to delete all commit logs. -->
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/task/killAllCommitLogs</url-pattern>
</servlet-mapping>
<!-- Mapreduce to delete all cross-tld entities. -->
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/task/killAllCrossTld</url-pattern>
</servlet-mapping>
<!-- This path serves up the App Engine results page for mapreduce runs. -->
<servlet>
<servlet-name>mapreduce</servlet-name>
<servlet-class>com.google.appengine.tools.mapreduce.MapReduceServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>mapreduce</servlet-name>
<url-pattern>/_dr/mapreduce/*</url-pattern>
</servlet-mapping>
<!-- Pipeline GUI servlets. -->
<servlet>
<servlet-name>pipeline</servlet-name>
<servlet-class>com.google.appengine.tools.pipeline.impl.servlets.PipelineServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>pipeline</servlet-name>
<url-pattern>/_ah/pipeline/*</url-pattern>
</servlet-mapping>
<!-- Registrar detail report publishing action. -->
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/publishDetailReport</url-pattern>
</servlet-mapping>
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/task/generateZoneFiles</url-pattern>
</servlet-mapping>
<!-- Security config -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Internal</web-resource-name>
<description>
Any request path starting with `/_dr/` will be restricted to requests originating
from the backend or by anyone authenticated to a Google account that's listed in
the AppEngine control panel settings for this project as a Viewer/Owner/Developer.
The `_dr` is short for Domain Registry to follow AppEngine naming conventions.
</description>
<url-pattern>/_dr/*</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- Require TLS on all requests. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Secure</web-resource-name>
<description>
Require encryption for all paths. http URLs will be redirected to https.
</description>
<url-pattern>/*</url-pattern>
</web-resource-collection>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- Shut down external access to registrar console. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Internal</web-resource-name>
<description>
Go home rogue registrar!
</description>
<url-pattern>/registrar*</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- Restrict access to source code. -->
<!-- These are only included in the default module war, but restricting here too for safety. -->
<security-constraint>
<web-resource-collection>
<web-resource-name>Internal</web-resource-name>
<description>No soup for you!</description>
<url-pattern>/assets/sources/*</url-pattern>
</web-resource-collection>
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
<!-- See: https://code.google.com/p/objectify-appengine/wiki/Setup -->
<filter>
<filter-name>ObjectifyFilter</filter-name>
<filter-class>com.googlecode.objectify.ObjectifyFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>ObjectifyFilter</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
<!-- Register types with Objectify. -->
<filter>
<filter-name>OfyFilter</filter-name>
<filter-class>com.google.domain.registry.model.ofy.OfyFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>OfyFilter</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
</web-app>

View file

@ -1,4 +0,0 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)

View file

@ -1,5 +0,0 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)

View file

@ -1,33 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<appengine-web-app xmlns="http://appengine.google.com/ns/1.0">
<application>domain-registry</application>
<version>1</version>
<module>backend</module>
<threadsafe>true</threadsafe>
<sessions-enabled>true</sessions-enabled>
<instance-class>B4_1G</instance-class>
<basic-scaling>
<max-instances>50</max-instances>
<idle-timeout>10m</idle-timeout>
</basic-scaling>
<system-properties>
<property name="java.util.logging.config.file"
value="WEB-INF/logging.properties"/>
<property name="com.google.domain.registry.environment"
value="production"/>
</system-properties>
<static-files>
<include path="/*.html" expiration="1d"/>
<include path="/assets/js/**" expiration="1d"/>
<include path="/assets/css/**" expiration="1d"/>
<include path="/assets/images/**" expiration="1d"/>
</static-files>
<!-- Prevent uncaught servlet errors from leaking a stack trace. -->
<static-error-handlers>
<handler file="error.html"/>
</static-error-handlers>
</appengine-web-app>

View file

@ -1,5 +0,0 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)

View file

@ -1,34 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<appengine-web-app xmlns="http://appengine.google.com/ns/1.0">
<application>domain-registry</application>
<version>1</version>
<module>default</module>
<threadsafe>true</threadsafe>
<sessions-enabled>true</sessions-enabled>
<instance-class>B4_1G</instance-class>
<basic-scaling>
<max-instances>50</max-instances>
<idle-timeout>10m</idle-timeout>
</basic-scaling>
<system-properties>
<property name="java.util.logging.config.file"
value="WEB-INF/logging.properties"/>
<property name="com.google.domain.registry.environment"
value="production"/>
</system-properties>
<static-files>
<include path="/*.html" expiration="1d"/>
<include path="/assets/js/**" expiration="1d"/>
<include path="/assets/css/**" expiration="1d"/>
<include path="/assets/images/**" expiration="1d"/>
<include path="/assets/sources/**" expiration="1d"/>
</static-files>
<!-- Prevent uncaught servlet errors from leaking a stack trace. -->
<static-error-handlers>
<handler file="error.html"/>
</static-error-handlers>
</appengine-web-app>

View file

@ -1,203 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<cronentries>
<!--
/cron/fanout params:
queue=<QUEUE_NAME>
endpoint=<ENDPOINT_NAME> // URL Path of servlet, which may contain placeholders:
// :tld - Replaced with the TLD, e.g. foo, soy
// :registrar - Replaced with registrar clientId
runInEmpty // Run in the empty namespace
forEachRealTld // Run for tlds with getTldType() == TldType.REAL
forEachTestTld // Run for tlds with getTldType() == TldType.TEST
exclude=TLD1[&exclude=TLD2] // exclude something otherwise included
-->
<cron>
<url>/_dr/task/rdeStaging</url>
<description>
This job generates a full RDE escrow deposit as a single gigantic XML document
and streams it to cloud storage. When this job has finished successfully, it'll
launch a separate task that uploads the deposit file to Iron Mountain via SFTP.
</description>
<!--
This only needs to run once per day, but we launch additional jobs in case the
cursor is lagging behind, so it'll catch up to the current date as quickly as
possible. The only job that'll run under normal circumstances is the one that's
close to midnight, since if the cursor is up-to-date, the task is a no-op.
We want it to be close to midnight because that reduces the chance that the
point-in-time code won't have to go to the extra trouble of fetching old
versions of objects from the datastore. However, we don't want it to run too
close to midnight, because there's always a chance that a change which was
timestamped before midnight hasn't fully been committed to the datastore. So
we add a 4+ minute grace period to ensure the transactions cool down, since
our queries are not transactional.
-->
<schedule>every 4 hours from 00:07 to 20:00</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=rde-upload&endpoint=/_dr/task/rdeUpload&forEachRealTld]]></url>
<description>
This job is a no-op unless RdeUploadCursor falls behind for some reason.
</description>
<schedule>every 4 hours synchronized</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=rde-report&endpoint=/_dr/task/rdeReport&forEachRealTld]]></url>
<description>
This job is a no-op unless RdeReportCursor falls behind for some reason.
</description>
<schedule>every 4 hours synchronized</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=marksdb&endpoint=/_dr/task/tmchDnl&runInEmpty]]></url>
<description>
This job downloads the latest DNL from MarksDB and inserts it into the database.
(See: TmchDnlServlet, ClaimsList)
</description>
<schedule>every 12 hours synchronized</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=marksdb&endpoint=/_dr/task/tmchSmdrl&runInEmpty]]></url>
<description>
This job downloads the latest SMDRL from MarksDB and inserts it into the database.
(See: TmchSmdrlServlet, SignedMarkRevocationList)
</description>
<schedule>every 12 hours synchronized</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=marksdb&endpoint=/_dr/task/tmchCrl&runInEmpty]]></url>
<description>
This job downloads the latest CRL from MarksDB and inserts it into the database.
(See: TmchCrlServlet)
</description>
<schedule>every 12 hours synchronized</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=group-members-sync&endpoint=/_dr/task/syncGroupMembers&runInEmpty]]></url>
<description>
Syncs RegistrarContact changes in the past hour to Google Groups.
</description>
<schedule>every 1 hours synchronized</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=sheet&endpoint=/_dr/task/syncRegistrarsSheet&runInEmpty]]></url>
<description>
Synchronize Registrar entities to Google Spreadsheets.
</description>
<schedule>every 1 hours synchronized</schedule>
<target>backend</target>
</cron>
<!-- TODO(b/23319222): Re-enable when fixed.
<cron>
<url><![CDATA[/_dr/cron/commitLogFanout?queue=delete-commits&endpoint=/_dr/task/deleteOldCommitLogs&jitterSeconds=600]]></url>
<description>
This job deletes commit logs from datastore that are old, e.g. thirty days.
</description>
<schedule>every 20 minutes synchronized</schedule>
<target>backend</target>
</cron>
-->
<cron>
<url><![CDATA[/_dr/cron/commitLogCheckpoint]]></url>
<description>
This job checkpoints the commit log buckets and exports the diff since last checkpoint to GCS.
</description>
<schedule>every 1 minutes synchronized</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/task/exportDomainLists]]></url>
<description>
This job exports lists of all active domain names to Google Cloud Storage.
</description>
<schedule>every 12 hours synchronized</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=export-snapshot&endpoint=/_dr/task/exportSnapshot&runInEmpty]]></url>
<description>
This job fires off a datastore backup-as-a-service job that generates snapshot files in GCS.
It also enqueues a new task to wait on the completion of that job and then load the resulting
snapshot into bigquery.
</description>
<!-- Keep the task-age-limit for this job's task queue less than this cron interval. -->
<schedule>every day 06:00</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=nordn&endpoint=/_dr/task/nordnUpload&forEachRealTld&lordn-phase=sunrise]]></url>
<description>
This job uploads LORDN Sunrise CSV files for each TLD to MarksDB. It should be
run at most every three hours, or at absolute minimum every 26 hours.
</description>
<!-- This may be set anywhere between "every 3 hours" and "every 25 hours". -->
<schedule>every 12 hours synchronized</schedule>
<timezone>UTC</timezone>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=nordn&endpoint=/_dr/task/nordnUpload&forEachRealTld&lordn-phase=claims]]></url>
<description>
This job uploads LORDN Claims CSV files for each TLD to MarksDB. It should be
run at most every three hours, or at absolute minimum every 26 hours.
</description>
<!-- This may be set anywhere between "every 3 hours" and "every 25 hours". -->
<schedule>every 12 hours synchronized</schedule>
<timezone>UTC</timezone>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/task/deleteProberData]]></url>
<description>
This job clears out data from probers and runs once a week.
</description>
<schedule>every monday 14:00</schedule>
<timezone>UTC</timezone>
<!-- TODO(b/27309488): maybe move this to the backend module. -->
<target>tools</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/fanout?queue=export-reserved-terms&endpoint=/_dr/task/exportReservedTerms&forEachRealTld]]></url>
<description>
Reserved terms export to Google Drive job for creating once-daily exports.
</description>
<schedule>every day 05:30</schedule>
<target>backend</target>
</cron>
<cron>
<url><![CDATA[/_dr/cron/readDnsQueue?jitterSeconds=45]]></url>
<description>
Lease all tasks from the dns-pull queue, group by TLD, and invoke PublishDnsUpdates for each
group.
</description>
<schedule>every 1 minutes synchronized</schedule>
<target>backend</target>
</cron>
</cronentries>

View file

@ -1,5 +0,0 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)

View file

@ -1,33 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<appengine-web-app xmlns="http://appengine.google.com/ns/1.0">
<application>domain-registry</application>
<version>1</version>
<module>tools</module>
<threadsafe>true</threadsafe>
<sessions-enabled>true</sessions-enabled>
<instance-class>B4_1G</instance-class>
<basic-scaling>
<max-instances>50</max-instances>
<idle-timeout>10m</idle-timeout>
</basic-scaling>
<system-properties>
<property name="java.util.logging.config.file"
value="WEB-INF/logging.properties"/>
<property name="com.google.domain.registry.environment"
value="production"/>
</system-properties>
<static-files>
<include path="/*.html" expiration="1d"/>
<include path="/assets/js/**" expiration="1d"/>
<include path="/assets/css/**" expiration="1d"/>
<include path="/assets/images/**" expiration="1d"/>
</static-files>
<!-- Prevent uncaught servlet errors from leaking a stack trace. -->
<static-error-handlers>
<handler file="error.html"/>
</static-error-handlers>
</appengine-web-app>

View file

@ -1,45 +0,0 @@
package(
default_visibility = ["//java/com/google/domain/registry:registry_project"],
)
java_library(
name = "export",
srcs = glob(["*.java"]),
deps = [
"//apiserving/discoverydata/bigquery:bigqueryv2",
"//apiserving/discoverydata/drive",
"//java/com/google/api/client/extensions/appengine/http",
"//java/com/google/api/client/googleapis/extensions/appengine/auth/oauth2",
"//java/com/google/api/client/googleapis/json",
"//java/com/google/api/client/http",
"//java/com/google/api/client/json",
"//java/com/google/api/client/json/jackson2",
"//java/com/google/common/annotations",
"//java/com/google/common/base",
"//java/com/google/common/collect",
"//java/com/google/common/html",
"//java/com/google/common/io",
"//java/com/google/common/net",
"//java/com/google/domain/registry/bigquery",
"//java/com/google/domain/registry/config",
"//java/com/google/domain/registry/gcs",
"//java/com/google/domain/registry/groups",
"//java/com/google/domain/registry/mapreduce",
"//java/com/google/domain/registry/mapreduce/inputs",
"//java/com/google/domain/registry/model",
"//java/com/google/domain/registry/request",
"//java/com/google/domain/registry/storage/drive",
"//java/com/google/domain/registry/util",
"//third_party/java/appengine:appengine-api",
"//third_party/java/appengine_gcs_client",
"//third_party/java/appengine_mapreduce2:appengine_mapreduce",
"//third_party/java/dagger",
"//third_party/java/joda_time",
"//third_party/java/json_simple",
"//third_party/java/jsr305_annotations",
"//third_party/java/jsr330_inject",
"//third_party/java/objectify:objectify-v4_1",
"//third_party/java/servlet/servlet_api",
],
)

View file

@ -1,173 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static com.google.appengine.api.taskqueue.TaskOptions.Builder.withUrl;
import static com.google.domain.registry.bigquery.BigqueryUtils.toJobReferenceString;
import com.google.api.services.bigquery.Bigquery;
import com.google.api.services.bigquery.model.Job;
import com.google.api.services.bigquery.model.JobReference;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.appengine.api.taskqueue.TaskOptions.Method;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Header;
import com.google.domain.registry.request.HttpException.BadRequestException;
import com.google.domain.registry.request.HttpException.NotModifiedException;
import com.google.domain.registry.request.Payload;
import com.google.domain.registry.util.FormattingLogger;
import com.google.domain.registry.util.TaskEnqueuer;
import dagger.Lazy;
import org.joda.time.Duration;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import javax.inject.Inject;
/**
* An action which polls the state of a bigquery job. If it is completed then it will log its
* completion state; otherwise it will return a failure code so that the task will be retried.
*/
@Action(
path = BigqueryPollJobAction.PATH,
method = {Action.Method.GET, Action.Method.POST},
automaticallyPrintOk = true)
public class BigqueryPollJobAction implements Runnable {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
static final String QUEUE = "export-bigquery-poll"; // See queue.xml
static final String PATH = "/_dr/task/pollBigqueryJob"; // See web.xml
static final String CHAINED_TASK_QUEUE_HEADER = "X-DomainRegistry-ChainedTaskQueue";
static final String PROJECT_ID_HEADER = "X-DomainRegistry-ProjectId";
static final String JOB_ID_HEADER = "X-DomainRegistry-JobId";
static final Duration POLL_COUNTDOWN = Duration.standardSeconds(20);
@Inject Bigquery bigquery;
@Inject TaskEnqueuer enqueuer;
@Inject @Header(CHAINED_TASK_QUEUE_HEADER) Lazy<String> chainedQueueName;
@Inject @Header(PROJECT_ID_HEADER) String projectId;
@Inject @Header(JOB_ID_HEADER) String jobId;
@Inject @Payload byte[] payload;
@Inject BigqueryPollJobAction() {}
@Override
public void run() {
checkJobOutcome(); // Throws a NotModifiedException if the job hasn't completed.
if (payload == null || payload.length == 0) {
return;
}
// If there is a payload, it's a chained task, so enqueue it.
TaskOptions task;
try {
task = (TaskOptions) new ObjectInputStream(new ByteArrayInputStream(payload)).readObject();
} catch (ClassNotFoundException | IOException e) {
logger.severe(e, e.toString());
throw new BadRequestException("Cannot deserialize task from payload", e);
}
String taskName = enqueuer.enqueue(getQueue(chainedQueueName.get()), task).getName();
logger.infofmt(
"Added chained task %s for %s to queue %s: %s",
taskName,
task.getUrl(),
chainedQueueName.get(),
task.toString());
}
/**
* Returns true if the provided job succeeded, false if it failed, and throws an exception if it
* is still pending.
*/
private boolean checkJobOutcome() {
Job job = null;
String jobRefString =
toJobReferenceString(new JobReference().setProjectId(projectId).setJobId(jobId));
try {
job = bigquery.jobs().get(projectId, jobId).execute();
} catch (IOException e) {
// We will throw a new exception because done==false, but first log this exception.
logger.warning(e, e.getMessage());
}
// If job is not yet done, then throw an exception so that we'll return a failing HTTP status
// code and the task will be retried.
if (job == null || !job.getStatus().getState().equals("DONE")) {
throw new NotModifiedException(jobRefString);
}
// Check if the job ended with an error.
if (job.getStatus().getErrorResult() != null) {
logger.severefmt("Bigquery job failed - %s - %s", jobRefString, job);
return false;
}
logger.infofmt("Bigquery job succeeded - %s", jobRefString);
return true;
}
/** Helper class to enqueue a bigquery poll job. */
public static class BigqueryPollJobEnqueuer {
private final TaskEnqueuer enqueuer;
@Inject
BigqueryPollJobEnqueuer(TaskEnqueuer enqueuer) {
this.enqueuer = enqueuer;
}
/** Enqueue a task to poll for the success or failure of the referenced BigQuery job. */
public TaskHandle enqueuePollTask(JobReference jobRef) {
return enqueuer.enqueue(getQueue(QUEUE), createCommonPollTask(jobRef).method(Method.GET));
}
/**
* Enqueue a task to poll for the success or failure of the referenced BigQuery job and to
* launch the provided task in the specified queue if the job succeeds.
*/
public TaskHandle enqueuePollTask(
JobReference jobRef, TaskOptions chainedTask, Queue chainedTaskQueue) throws IOException {
// Serialize the chainedTask into a byte array to put in the task payload.
ByteArrayOutputStream taskBytes = new ByteArrayOutputStream();
new ObjectOutputStream(taskBytes).writeObject(chainedTask);
return enqueuer.enqueue(
getQueue(QUEUE),
createCommonPollTask(jobRef)
.method(Method.POST)
.header(CHAINED_TASK_QUEUE_HEADER, chainedTaskQueue.getQueueName())
.payload(taskBytes.toByteArray()));
}
/**
* Enqueue a task to poll for the success or failure of the referenced BigQuery job and to
* launch the provided task in the specified queue if the job succeeds.
*/
private static TaskOptions createCommonPollTask(JobReference jobRef) {
// Omit host header so that task will be run on the current backend/module.
return withUrl(PATH)
.countdownMillis(POLL_COUNTDOWN.getMillis())
.header(PROJECT_ID_HEADER, jobRef.getProjectId())
.header(JOB_ID_HEADER, jobRef.getJobId());
}
}
}

View file

@ -1,179 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.common.base.MoreObjects.firstNonNull;
import static com.google.common.collect.Sets.intersection;
import static com.google.common.html.HtmlEscapers.htmlEscaper;
import static com.google.domain.registry.export.LoadSnapshotAction.enqueueLoadSnapshotTask;
import static com.google.domain.registry.request.RequestParameters.extractRequiredParameter;
import static com.google.domain.registry.util.FormattingLogger.getLoggerForCallerClass;
import static javax.servlet.http.HttpServletResponse.SC_ACCEPTED;
import static javax.servlet.http.HttpServletResponse.SC_BAD_REQUEST;
import static javax.servlet.http.HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
import static javax.servlet.http.HttpServletResponse.SC_NOT_MODIFIED;
import static javax.servlet.http.HttpServletResponse.SC_OK;
import com.google.appengine.api.taskqueue.QueueFactory;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.appengine.api.taskqueue.TaskOptions.Method;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import com.google.common.net.MediaType;
import com.google.domain.registry.export.DatastoreBackupInfo.BackupStatus;
import com.google.domain.registry.request.HttpException.BadRequestException;
import com.google.domain.registry.util.FormattingLogger;
import com.google.domain.registry.util.NonFinalForTesting;
import org.joda.time.Duration;
import org.joda.time.PeriodType;
import org.joda.time.format.PeriodFormat;
import java.io.IOException;
import java.util.Set;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/** Check the status of a snapshot, and if complete, trigger loading it into BigQuery. */
public class CheckSnapshotServlet extends HttpServlet {
/** Parameter names for passing parameters into this servlet. */
static final String SNAPSHOT_NAME_PARAM = "name";
static final String SNAPSHOT_KINDS_TO_LOAD_PARAM = "kindsToLoad";
/** Servlet-specific details needed for enqueuing tasks against itself. */
static final String QUEUE = "export-snapshot-poll"; // See queue.xml.
static final String PATH = "/_dr/task/checkSnapshot"; // See web.xml.
static final Duration POLL_COUNTDOWN = Duration.standardMinutes(2);
/** The maximum amount of time we allow a backup to run before abandoning it. */
static final Duration MAXIMUM_BACKUP_RUNNING_TIME = Duration.standardHours(20);
private static final FormattingLogger logger = getLoggerForCallerClass();
@NonFinalForTesting
private static DatastoreBackupService backupService = DatastoreBackupService.get();
@Override
public void service(HttpServletRequest req, HttpServletResponse rsp) throws IOException {
try {
rsp.setStatus(SC_OK);
rsp.setContentType(MediaType.PLAIN_TEXT_UTF_8.toString());
rsp.getWriter().write("OK\n\n");
super.service(req, rsp);
} catch (Throwable e) {
logger.severe(e, e.toString());
rsp.sendError(
e instanceof IllegalArgumentException ? SC_BAD_REQUEST : SC_INTERNAL_SERVER_ERROR,
htmlEscaper().escape(firstNonNull(e.getMessage(), e.toString())));
}
}
@Override
public void doGet(HttpServletRequest req, HttpServletResponse rsp) throws IOException {
// TODO(b/28266757): Remove this try/catch/rethrow block once this servlet is Daggerized.
try {
String snapshotName = extractRequiredParameter(req, SNAPSHOT_NAME_PARAM);
rsp.getWriter().write(backupService.findByName(snapshotName).getInformation());
} catch (BadRequestException e) {
throw new IllegalArgumentException(e.getMessage());
}
}
@Override
public void doPost(HttpServletRequest req, HttpServletResponse rsp) throws IOException {
String snapshotName;
String kindsToLoadParam;
// TODO(b/28266757): Remove this try/catch/rethrow block once this servlet is Daggerized.
try {
snapshotName = extractRequiredParameter(req, SNAPSHOT_NAME_PARAM);
kindsToLoadParam = extractRequiredParameter(req, SNAPSHOT_KINDS_TO_LOAD_PARAM);
} catch (BadRequestException e) {
throw new IllegalArgumentException(e.getMessage());
}
Set<String> kindsToLoad = ImmutableSet.copyOf(Splitter.on(',').split(kindsToLoadParam));
// Look up the backup by the provided name, stopping if we can't find it.
DatastoreBackupInfo backup;
try {
backup = backupService.findByName(snapshotName);
} catch (IllegalArgumentException e) {
String message = String.format("Bad backup name %s: %s", snapshotName, e.getMessage());
logger.severe(e, message);
// TODO(b/19081569): Ideally this would return a 2XX error so the task would not be retried,
// but we might abandon backups that start late and haven't yet written to datastore.
// We could fix that by replacing this with a two-phase polling strategy.
rsp.sendError(SC_BAD_REQUEST, htmlEscaper().escape(message));
return;
}
// Stop now if the backup is not complete.
if (!backup.getStatus().equals(BackupStatus.COMPLETE)) {
Duration runningTime = backup.getRunningTime();
if (runningTime.isShorterThan(MAXIMUM_BACKUP_RUNNING_TIME)) {
// Backup might still be running, so send a 304 to have the task retry.
rsp.sendError(SC_NOT_MODIFIED,
htmlEscaper().escape(String.format("Datastore backup %s still pending", snapshotName)));
} else {
// Declare the backup a lost cause, and send 202 Accepted so the task will not be retried.
String message = String.format("Datastore backup %s abandoned - not complete after %s",
snapshotName,
PeriodFormat.getDefault().print(
runningTime.toPeriod().normalizedStandard(
PeriodType.dayTime().withMillisRemoved())));
logger.severe(message);
rsp.sendError(SC_ACCEPTED, htmlEscaper().escape(message));
}
return;
}
// Get a compact string to identify this snapshot in BigQuery by trying to parse the unique
// suffix out of the snapshot name and falling back to the start time as a string.
String snapshotId = snapshotName.startsWith(ExportSnapshotServlet.SNAPSHOT_PREFIX)
? snapshotName.substring(ExportSnapshotServlet.SNAPSHOT_PREFIX.length())
: backup.getStartTime().toString("YYYYMMdd_HHmmss");
// Log a warning if kindsToLoad is not a subset of the exported snapshot kinds.
if (!backup.getKinds().containsAll(kindsToLoad)) {
logger.warningfmt(
"Kinds to load included non-exported kinds: %s",
Sets.difference(kindsToLoad, backup.getKinds()));
}
// Load kinds from the snapshot, limited to those also in kindsToLoad (if it's present).
ImmutableSet<String> exportedKindsToLoad =
ImmutableSet.copyOf(intersection(backup.getKinds(), kindsToLoad));
String message = String.format("Datastore backup %s complete - ", snapshotName);
if (exportedKindsToLoad.isEmpty()) {
message += "no kinds to load into BigQuery";
} else {
enqueueLoadSnapshotTask(snapshotId, backup.getGcsFilename().get(), exportedKindsToLoad);
message += "BigQuery load task enqueued";
}
logger.info(message);
rsp.getWriter().write(message);
}
/** Enqueue a poll task to monitor the named snapshot for completion. */
TaskHandle enqueuePollTask(String snapshotName, ImmutableSet<String> kindsToLoad) {
return QueueFactory.getQueue(QUEUE).add(
TaskOptions.Builder.withUrl(PATH)
.method(Method.POST)
.countdownMillis(POLL_COUNTDOWN.getMillis())
.param(SNAPSHOT_NAME_PARAM, snapshotName)
.param(SNAPSHOT_KINDS_TO_LOAD_PARAM, Joiner.on(',').join(kindsToLoad)));
}
}

View file

@ -1,150 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.joda.time.DateTimeZone.UTC;
import com.google.appengine.api.datastore.Entity;
import com.google.appengine.api.datastore.Text;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableSet;
import com.google.domain.registry.util.Clock;
import com.google.domain.registry.util.NonFinalForTesting;
import com.google.domain.registry.util.SystemClock;
import org.joda.time.DateTime;
import org.joda.time.Duration;
import java.util.Date;
import java.util.List;
/** Container for information about a datastore backup. */
public class DatastoreBackupInfo {
@NonFinalForTesting
private static Clock clock = new SystemClock();
/** The possible status values for a datastore backup. */
public enum BackupStatus { PENDING, COMPLETE }
/** The name of the datastore backup. */
private final String backupName;
/** The entity kinds included in this datastore backup. */
private final ImmutableSet<String> kinds;
/** The start time of the datastore backup. */
private final DateTime startTime;
/** The completion time of the datastore backup, present if it has completed. */
private final Optional<DateTime> completeTime;
/**
* The GCS filename to which the backup's top-level .backup_info manifest file has been written,
* present if the backup has completed.
*/
private final Optional<String> gcsFilename;
/** DatastoreBackupInfo instances should only be obtained via DatastoreBackupService. */
DatastoreBackupInfo(Entity backupEntity) {
backupName = (String) checkNotNull(backupEntity.getProperty("name"), "name");
@SuppressWarnings("unchecked")
List<String> rawKinds = (List<String>) checkNotNull(backupEntity.getProperty("kinds"), "kinds");
Date rawStartTime = (Date) checkNotNull(backupEntity.getProperty("start_time"), "start_time");
Date rawCompleteTime = (Date) backupEntity.getProperty("complete_time");
Text rawGcsFilename = (Text) backupEntity.getProperty("gs_handle");
kinds = ImmutableSet.copyOf(rawKinds);
startTime = new DateTime(rawStartTime).withZone(UTC);
completeTime = Optional.fromNullable(
rawCompleteTime == null ? null : new DateTime(rawCompleteTime).withZone(UTC));
gcsFilename = Optional.fromNullable(
rawGcsFilename == null ? null : gcsPathToUri(rawGcsFilename.getValue()));
}
/** This constructor is only exposed for test purposes. */
@VisibleForTesting
DatastoreBackupInfo(
String backupName,
DateTime startTime,
Optional<DateTime> completeTime,
ImmutableSet<String> kinds,
Optional<String> gcsFilename) {
this.backupName = backupName;
this.startTime = startTime;
this.completeTime = completeTime;
this.kinds = kinds;
this.gcsFilename = gcsFilename;
}
/**
* Rewrite a GCS path as stored by Datastore Admin (with a "/gs/" prefix) to the more standard
* URI format that uses a "gs://" scheme prefix.
*/
private static String gcsPathToUri(String backupGcsPath) {
checkArgument(backupGcsPath.startsWith("/gs/"), "GCS path not in expected format");
return backupGcsPath.replaceFirst("/gs/", "gs://");
}
public String getName() {
return backupName;
}
public ImmutableSet<String> getKinds() {
return kinds;
}
public BackupStatus getStatus() {
return completeTime.isPresent() ? BackupStatus.COMPLETE : BackupStatus.PENDING;
}
public DateTime getStartTime() {
return startTime;
}
public Optional<DateTime> getCompleteTime() {
return completeTime;
}
/**
* Returns the length of time the backup ran for (if completed) or the length of time since the
* backup started (if it has not completed).
*/
public Duration getRunningTime() {
return new Duration(startTime, completeTime.or(clock.nowUtc()));
}
public Optional<String> getGcsFilename() {
return gcsFilename;
}
/** Returns a string version of key information about the backup. */
public String getInformation() {
return Joiner.on('\n')
.join(
"Backup name: " + backupName,
"Status: " + getStatus(),
"Started: " + startTime,
"Ended: " + completeTime.orNull(),
"Duration: " + getRunningTime().toPeriod().toString().substring(2).toLowerCase(),
"GCS: " + gcsFilename.orNull(),
"Kinds: " + kinds,
"");
}
}

View file

@ -1,122 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.appengine.api.datastore.DatastoreServiceFactory.getDatastoreService;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static com.google.common.base.Strings.nullToEmpty;
import com.google.appengine.api.datastore.Entity;
import com.google.appengine.api.datastore.Query;
import com.google.appengine.api.modules.ModulesService;
import com.google.appengine.api.modules.ModulesServiceFactory;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.appengine.api.taskqueue.TaskOptions.Method;
import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.domain.registry.util.NonFinalForTesting;
import java.util.NoSuchElementException;
/** An object providing methods for starting and querying datastore backups. */
public class DatastoreBackupService {
/** The internal kind name used for entities storing information about datastore backups. */
static final String BACKUP_INFO_KIND = "_AE_Backup_Information";
/** The name of the app version used for hosting the Datastore Admin functionality. */
static final String DATASTORE_ADMIN_VERSION_NAME = "ah-builtin-python-bundle";
@NonFinalForTesting
private static ModulesService modulesService = ModulesServiceFactory.getModulesService();
/**
* Returns an instance of this service.
*
* <p>This method exists to allow for making the service a singleton object if desired at some
* future point; the choice is meaningless right now because the service maintains no state.
* That means its client-facing methods could in theory be static methods, but they are not
* because that makes it difficult to mock this service in clients.
*/
public static DatastoreBackupService get() {
return new DatastoreBackupService();
}
/**
* Generates the TaskOptions needed to trigger an AppEngine datastore backup job.
*
* @see "https://developers.google.com/appengine/articles/scheduled_backups"
*/
private static TaskOptions makeTaskOptions(
String queue, String name, String gcsBucket, ImmutableSet<String> kinds) {
String hostname = modulesService.getVersionHostname("default", DATASTORE_ADMIN_VERSION_NAME);
TaskOptions options = TaskOptions.Builder.withUrl("/_ah/datastore_admin/backup.create")
.header("Host", hostname)
.method(Method.GET)
.param("name", name + "_") // Add underscore since the name will be used as a prefix.
.param("filesystem", "gs")
.param("gs_bucket_name", gcsBucket)
.param("queue", queue);
for (String kind : kinds) {
options.param("kind", kind);
}
return options;
}
/**
* Launches a new datastore backup with the given name, GCS bucket, and set of kinds by
* submitting a task to the given task queue, and returns a handle to that task.
*/
public TaskHandle launchNewBackup(
String queue, String name, String gcsBucket, ImmutableSet<String> kinds) {
return getQueue(queue).add(makeTaskOptions(queue, name, gcsBucket, kinds));
}
/** Return an iterable of all datastore backups whose names have the given string prefix. */
public Iterable<DatastoreBackupInfo> findAllByNamePrefix(final String namePrefix) {
// Need the raw DatastoreService to access the internal _AE_Backup_Information entities.
// TODO(b/19081037): make an Objectify entity class for these raw datastore entities instead.
return FluentIterable
.from(getDatastoreService().prepare(new Query(BACKUP_INFO_KIND)).asIterable())
.filter(new Predicate<Entity>() {
@Override
public boolean apply(Entity entity) {
return nullToEmpty((String) entity.getProperty("name")).startsWith(namePrefix);
}})
.transform(new Function<Entity, DatastoreBackupInfo>() {
@Override
public DatastoreBackupInfo apply(Entity entity) {
return new DatastoreBackupInfo(entity);
}});
}
/**
* Return a single DatastoreBackup that uniquely matches this name prefix. Throws an IAE
* if no backups match or if more than one backup matches.
*/
public DatastoreBackupInfo findByName(final String namePrefix) {
try {
return Iterables.getOnlyElement(findAllByNamePrefix(namePrefix));
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("More than one backup with name prefix " + namePrefix, e);
} catch (NoSuchElementException e) {
throw new IllegalArgumentException("No backup found with name prefix " + namePrefix, e);
}
}
}

View file

@ -1,61 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static dagger.Provides.Type.SET_VALUES;
import com.google.api.client.http.HttpRequestInitializer;
import com.google.api.client.http.HttpTransport;
import com.google.api.client.json.JsonFactory;
import com.google.api.services.drive.Drive;
import com.google.api.services.drive.DriveScopes;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.request.OAuthScopes;
import dagger.Module;
import dagger.Provides;
import java.util.Set;
/**
* Dagger module for Google {@link Drive} service connection objects.
*
* @see com.google.domain.registry.config.ConfigModule
* @see com.google.domain.registry.request.Modules.UrlFetchTransportModule
* @see com.google.domain.registry.request.Modules.Jackson2Module
* @see com.google.domain.registry.request.Modules.AppIdentityCredentialModule
* @see com.google.domain.registry.request.Modules.UseAppIdentityCredentialForGoogleApisModule
*/
@Module
public final class DriveModule {
/** Provides OAuth2 scopes for the Drive service needed by Domain Registry. */
@Provides(type = SET_VALUES)
@OAuthScopes
static Set<String> provideDriveOAuthScopes() {
return DriveScopes.all();
}
@Provides
static Drive provideDrive(
HttpTransport transport,
JsonFactory jsonFactory,
HttpRequestInitializer httpRequestInitializer,
@Config("projectId") String projectId) {
return new Drive.Builder(transport, jsonFactory, httpRequestInitializer)
.setApplicationName(projectId)
.build();
}
}

View file

@ -1,96 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.common.base.Predicates.not;
import static com.google.domain.registry.model.EntityClasses.CLASS_TO_KIND_FUNCTION;
import static com.google.domain.registry.util.TypeUtils.hasAnnotation;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Ordering;
import com.google.domain.registry.model.EntityClasses;
import com.google.domain.registry.model.ImmutableObject;
import com.google.domain.registry.model.annotations.NotBackedUp;
import com.google.domain.registry.model.annotations.VirtualEntity;
import com.google.domain.registry.model.billing.BillingEvent.Cancellation;
import com.google.domain.registry.model.billing.BillingEvent.Modification;
import com.google.domain.registry.model.billing.BillingEvent.OneTime;
import com.google.domain.registry.model.billing.BillingEvent.Recurring;
import com.google.domain.registry.model.billing.RegistrarCredit;
import com.google.domain.registry.model.billing.RegistrarCreditBalance;
import com.google.domain.registry.model.contact.ContactResource;
import com.google.domain.registry.model.domain.DomainBase;
import com.google.domain.registry.model.host.HostResource;
import com.google.domain.registry.model.index.DomainApplicationIndex;
import com.google.domain.registry.model.index.EppResourceIndex;
import com.google.domain.registry.model.index.ForeignKeyIndex.ForeignKeyContactIndex;
import com.google.domain.registry.model.index.ForeignKeyIndex.ForeignKeyDomainIndex;
import com.google.domain.registry.model.index.ForeignKeyIndex.ForeignKeyHostIndex;
import com.google.domain.registry.model.registrar.Registrar;
import com.google.domain.registry.model.registrar.RegistrarContact;
import com.google.domain.registry.model.registry.Registry;
import com.google.domain.registry.model.registry.label.PremiumList;
import com.google.domain.registry.model.registry.label.PremiumList.PremiumListEntry;
import com.google.domain.registry.model.reporting.HistoryEntry;
/** Constants related to export code. */
public final class ExportConstants {
/** Set of entity classes to export into BigQuery for reporting purposes. */
@VisibleForTesting
@SuppressWarnings("unchecked") // varargs
static final ImmutableSet<Class<? extends ImmutableObject>> REPORTING_ENTITY_CLASSES =
ImmutableSet.of(
Cancellation.class,
ContactResource.class,
DomainApplicationIndex.class,
DomainBase.class,
EppResourceIndex.class,
ForeignKeyContactIndex.class,
ForeignKeyDomainIndex.class,
ForeignKeyHostIndex.class,
HistoryEntry.class,
HostResource.class,
Modification.class,
OneTime.class,
PremiumList.class,
PremiumListEntry.class,
Recurring.class,
Registrar.class,
RegistrarContact.class,
RegistrarCredit.class,
RegistrarCreditBalance.class,
Registry.class);
/** Returns the names of kinds to include in datastore backups. */
public static ImmutableSet<String> getBackupKinds() {
// Back up all entity classes that aren't annotated with @VirtualEntity (never even persisted
// to datastore, so they can't be backed up) or @NotBackedUp (intentionally omitted).
return FluentIterable.from(EntityClasses.ALL_CLASSES)
.filter(not(hasAnnotation(VirtualEntity.class)))
.filter(not(hasAnnotation(NotBackedUp.class)))
.transform(CLASS_TO_KIND_FUNCTION)
.toSortedSet(Ordering.natural());
}
/** Returns the names of kinds to import into reporting tools (e.g. BigQuery). */
public static ImmutableSet<String> getReportingKinds() {
return FluentIterable.from(REPORTING_ENTITY_CLASSES)
.transform(CLASS_TO_KIND_FUNCTION)
.toSortedSet(Ordering.natural());
}
}

View file

@ -1,137 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.appengine.tools.cloudstorage.GcsServiceFactory.createGcsService;
import static com.google.domain.registry.mapreduce.inputs.EppResourceInputs.createEntityInput;
import static com.google.domain.registry.model.EppResourceUtils.isActive;
import static com.google.domain.registry.model.registry.Registries.getTldsOfType;
import static com.google.domain.registry.util.PipelineUtils.createJobPath;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.joda.time.DateTimeZone.UTC;
import com.google.appengine.tools.cloudstorage.GcsFilename;
import com.google.appengine.tools.cloudstorage.RetryParams;
import com.google.appengine.tools.mapreduce.Mapper;
import com.google.appengine.tools.mapreduce.Reducer;
import com.google.appengine.tools.mapreduce.ReducerInput;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.gcs.GcsUtils;
import com.google.domain.registry.mapreduce.MapreduceAction;
import com.google.domain.registry.mapreduce.MapreduceRunner;
import com.google.domain.registry.model.domain.DomainResource;
import com.google.domain.registry.model.registry.Registry.TldType;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Response;
import com.google.domain.registry.util.FormattingLogger;
import org.joda.time.DateTime;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.io.Writer;
import javax.inject.Inject;
/**
* A mapreduce that exports the list of active domains on all real TLDs to Google Cloud Storage.
*
* Each TLD's active domain names are exported as a newline-delimited flat text file with the name
* TLD.txt into the domain-lists bucket. Note that this overwrites the files in place.
*/
@Action(path = "/_dr/task/exportDomainLists")
public class ExportDomainListsAction implements MapreduceAction {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
private static final int MAX_NUM_REDUCE_SHARDS = 100;
@Inject MapreduceRunner mrRunner;
@Inject Response response;
@Inject @Config("domainListsGcsBucket") String gcsBucket;
@Inject @Config("gcsBufferSize") int gcsBufferSize;
@Inject ExportDomainListsAction() {}
@Override
public void run() {
ImmutableSet<String> realTlds = getTldsOfType(TldType.REAL);
logger.infofmt("Exporting domain lists for tlds %s", realTlds);
response.sendJavaScriptRedirect(createJobPath(mrRunner
.setJobName("Export domain lists")
.setModuleName("backend")
.setDefaultReduceShards(Math.min(realTlds.size(), MAX_NUM_REDUCE_SHARDS))
.runMapreduce(
new ExportDomainListsMapper(DateTime.now(UTC), realTlds),
new ExportDomainListsReducer(gcsBucket, gcsBufferSize),
ImmutableList.of(createEntityInput(DomainResource.class)))));
}
static class ExportDomainListsMapper extends Mapper<DomainResource, String, String> {
private static final long serialVersionUID = -7312206212434039854L;
private final DateTime exportTime;
private final ImmutableSet<String> realTlds;
ExportDomainListsMapper(DateTime exportTime, ImmutableSet<String> realTlds) {
this.exportTime = exportTime;
this.realTlds = realTlds;
}
@Override
public void map(DomainResource domain) {
if (realTlds.contains(domain.getTld()) && isActive(domain, exportTime)) {
emit(domain.getTld(), domain.getFullyQualifiedDomainName());
getContext().incrementCounter(String.format("domains in tld %s", domain.getTld()));
}
}
}
static class ExportDomainListsReducer extends Reducer<String, String, Void> {
private static final long serialVersionUID = 7035260977259119087L;
private final String gcsBucket;
private final int gcsBufferSize;
public ExportDomainListsReducer(String gcsBucket, int gcsBufferSize) {
this.gcsBucket = gcsBucket;
this.gcsBufferSize = gcsBufferSize;
}
@Override
public void reduce(String tld, ReducerInput<String> fqdns) {
GcsFilename filename = new GcsFilename(gcsBucket, tld + ".txt");
GcsUtils cloudStorage =
new GcsUtils(createGcsService(RetryParams.getDefaultInstance()), gcsBufferSize);
try (OutputStream gcsOutput = cloudStorage.openOutputStream(filename);
Writer osWriter = new OutputStreamWriter(gcsOutput, UTF_8);
PrintWriter writer = new PrintWriter(osWriter)) {
long count;
for (count = 0; fqdns.hasNext(); count++) {
writer.println(fqdns.next());
}
writer.flush();
getContext().incrementCounter("tld domain lists written out");
logger.infofmt("Wrote out %d domains for tld %s.", count, tld);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
}

View file

@ -1,94 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.domain.registry.export.BigqueryPollJobAction.CHAINED_TASK_QUEUE_HEADER;
import static com.google.domain.registry.export.BigqueryPollJobAction.JOB_ID_HEADER;
import static com.google.domain.registry.export.BigqueryPollJobAction.PROJECT_ID_HEADER;
import static com.google.domain.registry.export.LoadSnapshotAction.LOAD_SNAPSHOT_FILE_PARAM;
import static com.google.domain.registry.export.LoadSnapshotAction.LOAD_SNAPSHOT_ID_PARAM;
import static com.google.domain.registry.export.LoadSnapshotAction.LOAD_SNAPSHOT_KINDS_PARAM;
import static com.google.domain.registry.export.UpdateSnapshotViewAction.UPDATE_SNAPSHOT_DATASET_ID_PARAM;
import static com.google.domain.registry.export.UpdateSnapshotViewAction.UPDATE_SNAPSHOT_KIND_PARAM;
import static com.google.domain.registry.export.UpdateSnapshotViewAction.UPDATE_SNAPSHOT_TABLE_ID_PARAM;
import static com.google.domain.registry.request.RequestParameters.extractRequiredHeader;
import static com.google.domain.registry.request.RequestParameters.extractRequiredParameter;
import com.google.domain.registry.request.Header;
import com.google.domain.registry.request.Parameter;
import dagger.Module;
import dagger.Provides;
import javax.servlet.http.HttpServletRequest;
/** Dagger module for data export tasks. */
@Module
public final class ExportRequestModule {
@Provides
@Parameter(UPDATE_SNAPSHOT_DATASET_ID_PARAM)
static String provideUpdateSnapshotDatasetId(HttpServletRequest req) {
return extractRequiredParameter(req, UPDATE_SNAPSHOT_DATASET_ID_PARAM);
}
@Provides
@Parameter(UPDATE_SNAPSHOT_TABLE_ID_PARAM)
static String provideUpdateSnapshotTableId(HttpServletRequest req) {
return extractRequiredParameter(req, UPDATE_SNAPSHOT_TABLE_ID_PARAM);
}
@Provides
@Parameter(UPDATE_SNAPSHOT_KIND_PARAM)
static String provideUpdateSnapshotKind(HttpServletRequest req) {
return extractRequiredParameter(req, UPDATE_SNAPSHOT_KIND_PARAM);
}
@Provides
@Parameter(LOAD_SNAPSHOT_FILE_PARAM)
static String provideLoadSnapshotFile(HttpServletRequest req) {
return extractRequiredParameter(req, LOAD_SNAPSHOT_FILE_PARAM);
}
@Provides
@Parameter(LOAD_SNAPSHOT_ID_PARAM)
static String provideLoadSnapshotId(HttpServletRequest req) {
return extractRequiredParameter(req, LOAD_SNAPSHOT_ID_PARAM);
}
@Provides
@Parameter(LOAD_SNAPSHOT_KINDS_PARAM)
static String provideLoadSnapshotKinds(HttpServletRequest req) {
return extractRequiredParameter(req, LOAD_SNAPSHOT_KINDS_PARAM);
}
@Provides
@Header(CHAINED_TASK_QUEUE_HEADER)
static String provideChainedTaskQueue(HttpServletRequest req) {
return extractRequiredHeader(req, CHAINED_TASK_QUEUE_HEADER);
}
@Provides
@Header(JOB_ID_HEADER)
static String provideJobId(HttpServletRequest req) {
return extractRequiredHeader(req, JOB_ID_HEADER);
}
@Provides
@Header(PROJECT_ID_HEADER)
static String provideProjectId(HttpServletRequest req) {
return extractRequiredHeader(req, PROJECT_ID_HEADER);
}
}

View file

@ -1,85 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Strings.isNullOrEmpty;
import static com.google.common.net.MediaType.PLAIN_TEXT_UTF_8;
import static com.google.domain.registry.export.ExportUtils.exportReservedTerms;
import static com.google.domain.registry.request.Action.Method.POST;
import static java.nio.charset.StandardCharsets.UTF_8;
import static javax.servlet.http.HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
import static javax.servlet.http.HttpServletResponse.SC_OK;
import com.google.common.net.MediaType;
import com.google.domain.registry.model.registry.Registry;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.request.RequestParameters;
import com.google.domain.registry.request.Response;
import com.google.domain.registry.storage.drive.DriveConnection;
import com.google.domain.registry.util.FormattingLogger;
import javax.inject.Inject;
/** Action that exports the publicly viewable reserved terms list for a TLD to Google Drive. */
@Action(path = "/_dr/task/exportReservedTerms", method = POST)
public class ExportReservedTermsAction implements Runnable {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
static final MediaType EXPORT_MIME_TYPE = MediaType.PLAIN_TEXT_UTF_8;
static final String RESERVED_TERMS_FILENAME = "reserved_terms.txt";
@Inject DriveConnection driveConnection;
@Inject @Parameter(RequestParameters.PARAM_TLD) String tld;
@Inject Response response;
@Inject ExportReservedTermsAction() {}
/**
* Exports the reserved terms for the TLD specified via the "tld" param to a newline-delimited
* UTF-8-formatted CSV file (with one column) named "reserved_terms.txt" in the Google Drive
* folder with the id specified for that TLD.
*
* <p>This servlet prints the ID of the file in GoogleDrive that was created/updated.
*/
@Override
public void run() {
response.setContentType(PLAIN_TEXT_UTF_8);
try {
Registry registry = Registry.get(tld);
String resultMsg;
if (registry.getReservedLists().isEmpty() && isNullOrEmpty(registry.getDriveFolderId())) {
resultMsg = "No reserved lists configured";
logger.infofmt("No reserved terms to export for TLD %s", tld);
} else {
checkNotNull(registry.getDriveFolderId(), "No drive folder associated with this TLD");
resultMsg = driveConnection.createOrUpdateFile(
RESERVED_TERMS_FILENAME,
EXPORT_MIME_TYPE,
registry.getDriveFolderId(),
exportReservedTerms(registry).getBytes(UTF_8));
logger.infofmt("Exporting reserved terms succeeded for TLD %s, response was: %s",
tld, resultMsg);
}
response.setStatus(SC_OK);
response.setPayload(resultMsg);
} catch (Throwable e) {
response.setStatus(SC_INTERNAL_SERVER_ERROR);
response.setPayload(e.getMessage());
throw new RuntimeException(
String.format("Exception occurred while exporting reserved terms for TLD %s.", tld), e);
}
}
}

View file

@ -1,92 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.common.base.MoreObjects.firstNonNull;
import static com.google.common.html.HtmlEscapers.htmlEscaper;
import static javax.servlet.http.HttpServletResponse.SC_BAD_REQUEST;
import static javax.servlet.http.HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
import static javax.servlet.http.HttpServletResponse.SC_OK;
import com.google.common.net.MediaType;
import com.google.domain.registry.config.RegistryEnvironment;
import com.google.domain.registry.util.Clock;
import com.google.domain.registry.util.FormattingLogger;
import com.google.domain.registry.util.NonFinalForTesting;
import com.google.domain.registry.util.SystemClock;
import java.io.IOException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Trigger a backup-as-a-service job that writes a snapshot to Google Cloud Storage.
*
* <p>This is the first step of a four step workflow for exporting snapshots, with each step calling
* the next upon successful completion:<ol>
* <li>The snapshot is exported to Google Cloud Storage (this servlet).
* <li>The {@link CheckSnapshotServlet} polls until the export is completed.
* <li>The {@link LoadSnapshotAction} imports the data from GCS to BigQuery.
* <li>The {@link UpdateSnapshotViewAction} updates the view in latest_snapshot.
* </ol>
*/
public class ExportSnapshotServlet extends HttpServlet {
private static final RegistryEnvironment ENVIRONMENT = RegistryEnvironment.get();
/** Queue to use for enqueuing the task that will actually launch the backup. */
static final String QUEUE = "export-snapshot"; // See queue.xml.
/** Prefix to use for naming all snapshots that are started by this servlet. */
static final String SNAPSHOT_PREFIX = "auto_snapshot_";
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@NonFinalForTesting
private static Clock clock = new SystemClock();
@NonFinalForTesting
private static DatastoreBackupService backupService = DatastoreBackupService.get();
@NonFinalForTesting
private static CheckSnapshotServlet checkSnapshotServlet = new CheckSnapshotServlet();
@Override
public void doPost(HttpServletRequest req, HttpServletResponse rsp) throws IOException {
try {
// Use a unique name for the snapshot so we can explicitly check its completion later.
String snapshotName = SNAPSHOT_PREFIX + clock.nowUtc().toString("YYYYMMdd_HHmmss");
backupService.launchNewBackup(
QUEUE,
snapshotName,
ENVIRONMENT.config().getSnapshotsBucket(),
ExportConstants.getBackupKinds());
// Enqueue a poll task to monitor the backup and load reporting-related kinds into bigquery.
checkSnapshotServlet.enqueuePollTask(snapshotName, ExportConstants.getReportingKinds());
String message = "Datastore backup started with name: " + snapshotName;
logger.info(message);
rsp.setStatus(SC_OK);
rsp.setContentType(MediaType.PLAIN_TEXT_UTF_8.toString());
rsp.getWriter().write("OK\n\n" + message);
} catch (Throwable e) {
logger.severe(e, e.toString());
rsp.sendError(
e instanceof IllegalArgumentException ? SC_BAD_REQUEST : SC_INTERNAL_SERVER_ERROR,
htmlEscaper().escape(firstNonNull(e.getMessage(), e.toString())));
}
}
}

View file

@ -1,53 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.domain.registry.model.registry.label.ReservationType.UNRESERVED;
import com.google.common.base.Joiner;
import com.google.domain.registry.config.RegistryEnvironment;
import com.google.domain.registry.model.registry.Registry;
import com.google.domain.registry.model.registry.label.ReservedList;
import com.google.domain.registry.model.registry.label.ReservedList.ReservedListEntry;
import com.googlecode.objectify.Key;
import java.util.Set;
import java.util.TreeSet;
/** Container class for exported-related static utility methods. */
public class ExportUtils {
private ExportUtils() {}
/** Returns the file contents of the auto-export reserved terms document for the given TLD. */
public static String exportReservedTerms(Registry registry) {
StringBuilder termsBuilder =
new StringBuilder(RegistryEnvironment.get().config().getReservedTermsExportDisclaimer());
Set<String> reservedTerms = new TreeSet<>();
for (Key<ReservedList> key : registry.getReservedLists()) {
ReservedList reservedList = ReservedList.load(key).get();
if (reservedList.getShouldPublish()) {
for (ReservedListEntry entry : reservedList.getReservedListEntries().values()) {
if (entry.getValue() != UNRESERVED) {
reservedTerms.add(entry.getLabel());
}
}
}
}
Joiner.on("\n").appendTo(termsBuilder, reservedTerms);
return termsBuilder.append("\n").toString();
}
}

View file

@ -1,163 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static com.google.common.base.MoreObjects.firstNonNull;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.domain.registry.export.UpdateSnapshotViewAction.createViewUpdateTask;
import static com.google.domain.registry.request.Action.Method.POST;
import static com.google.domain.registry.util.FormattingLogger.getLoggerForCallerClass;
import com.google.api.services.bigquery.Bigquery;
import com.google.api.services.bigquery.model.Job;
import com.google.api.services.bigquery.model.JobConfiguration;
import com.google.api.services.bigquery.model.JobConfigurationLoad;
import com.google.api.services.bigquery.model.JobReference;
import com.google.api.services.bigquery.model.TableReference;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.appengine.api.taskqueue.TaskOptions.Method;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.domain.registry.bigquery.BigqueryFactory;
import com.google.domain.registry.bigquery.BigqueryUtils.SourceFormat;
import com.google.domain.registry.bigquery.BigqueryUtils.WriteDisposition;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.export.BigqueryPollJobAction.BigqueryPollJobEnqueuer;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.HttpException.BadRequestException;
import com.google.domain.registry.request.HttpException.InternalServerErrorException;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.util.Clock;
import com.google.domain.registry.util.FormattingLogger;
import org.joda.time.DateTime;
import java.io.IOException;
import javax.inject.Inject;
/** Action to load a Datastore snapshot from Google Cloud Storage into BigQuery. */
@Action(path = LoadSnapshotAction.PATH, method = POST)
public class LoadSnapshotAction implements Runnable {
/** Parameter names for passing parameters into the servlet. */
static final String LOAD_SNAPSHOT_ID_PARAM = "id";
static final String LOAD_SNAPSHOT_FILE_PARAM = "file";
static final String LOAD_SNAPSHOT_KINDS_PARAM = "kinds";
static final String SNAPSHOTS_DATASET = "snapshots";
/** Servlet-specific details needed for enqueuing tasks against itself. */
static final String QUEUE = "export-snapshot"; // See queue.xml.
static final String PATH = "/_dr/task/loadSnapshot"; // See web.xml.
private static final FormattingLogger logger = getLoggerForCallerClass();
@Inject BigqueryFactory bigqueryFactory;
@Inject BigqueryPollJobEnqueuer bigqueryPollEnqueuer;
@Inject Clock clock;
@Inject @Config("projectId") String projectId;
@Inject @Parameter(LOAD_SNAPSHOT_FILE_PARAM) String snapshotFile;
@Inject @Parameter(LOAD_SNAPSHOT_ID_PARAM) String snapshotId;
@Inject @Parameter(LOAD_SNAPSHOT_KINDS_PARAM) String snapshotKinds;
@Inject LoadSnapshotAction() {}
/** Enqueue a task for starting a backup load. */
public static TaskHandle enqueueLoadSnapshotTask(
String snapshotId, String gcsFile, ImmutableSet<String> kinds) {
return getQueue(QUEUE).add(
TaskOptions.Builder.withUrl(PATH)
.method(Method.POST)
.param(LOAD_SNAPSHOT_ID_PARAM, snapshotId)
.param(LOAD_SNAPSHOT_FILE_PARAM, gcsFile)
.param(LOAD_SNAPSHOT_KINDS_PARAM, Joiner.on(',').join(kinds)));
}
@Override
public void run() {
try {
String message =
loadSnapshot(snapshotId, snapshotFile, Splitter.on(',').split(snapshotKinds));
logger.infofmt("Loaded snapshot successfully: %s", message);
} catch (Throwable e) {
logger.severe(e, "Error loading snapshot");
if (e instanceof IllegalArgumentException) {
throw new BadRequestException("Error calling load snapshot: " + e.getMessage(), e);
} else {
throw new InternalServerErrorException(
"Error loading snapshot: " + firstNonNull(e.getMessage(), e.toString()));
}
}
}
private String loadSnapshot(String snapshotId, String gcsFilename, Iterable<String> kinds)
throws IOException {
Bigquery bigquery = bigqueryFactory.create(projectId, SNAPSHOTS_DATASET);
DateTime now = clock.nowUtc();
String loadMessage =
String.format("Loading datastore snapshot %s from %s...", snapshotId, gcsFilename);
logger.info(loadMessage);
StringBuilder builder = new StringBuilder(loadMessage + "\n");
builder.append("Load jobs:\n");
for (String kindName : kinds) {
String jobId = String.format("load-snapshot-%s-%s-%d", snapshotId, kindName, now.getMillis());
JobReference jobRef = new JobReference().setProjectId(projectId).setJobId(jobId);
String sourceUri = getBackupInfoFileForKind(gcsFilename, kindName);
String tableId = String.format("%s_%s", snapshotId, kindName);
// Launch the load job.
Job job = makeLoadJob(jobRef, sourceUri, tableId);
bigquery.jobs().insert(projectId, job).execute();
// Enqueue a task to check on the load job's completion, and if it succeeds, to update a
// well-known view in BigQuery to point at the newly loaded snapshot table for this kind.
bigqueryPollEnqueuer.enqueuePollTask(
jobRef,
createViewUpdateTask(SNAPSHOTS_DATASET, tableId, kindName),
getQueue(UpdateSnapshotViewAction.QUEUE));
builder.append(String.format(" - %s:%s\n", projectId, jobId));
logger.infofmt("Submitted load job %s:%s", projectId, jobId);
}
return builder.toString();
}
private static String getBackupInfoFileForKind(String backupInfoFile, String kindName) {
String extension = ".backup_info";
checkArgument(backupInfoFile.endsWith(extension), "backup info file extension missing");
String prefix = backupInfoFile.substring(0, backupInfoFile.length() - extension.length());
return Joiner.on('.').join(prefix, kindName, extension.substring(1));
}
private Job makeLoadJob(JobReference jobRef, String sourceUri, String tableId) {
TableReference tableReference = new TableReference()
.setProjectId(jobRef.getProjectId())
.setDatasetId(SNAPSHOTS_DATASET)
.setTableId(tableId);
return new Job()
.setJobReference(jobRef)
.setConfiguration(new JobConfiguration()
.setLoad(new JobConfigurationLoad()
.setWriteDisposition(WriteDisposition.WRITE_EMPTY.toString())
.setSourceFormat(SourceFormat.DATASTORE_BACKUP.toString())
.setSourceUris(ImmutableList.of(sourceUri))
.setDestinationTable(tableReference)));
}
}

View file

@ -1,124 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.common.base.MoreObjects.firstNonNull;
import static com.google.domain.registry.util.PreconditionsUtils.checkArgumentNotNull;
import com.google.appengine.tools.cloudstorage.GcsFilename;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.ByteStreams;
import com.google.common.net.MediaType;
import com.google.domain.registry.gcs.GcsUtils;
import com.google.domain.registry.model.registrar.Registrar;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.HttpException.BadRequestException;
import com.google.domain.registry.request.HttpException.InternalServerErrorException;
import com.google.domain.registry.request.JsonActionRunner;
import com.google.domain.registry.request.JsonActionRunner.JsonAction;
import com.google.domain.registry.storage.drive.DriveConnection;
import com.google.domain.registry.util.FormattingLogger;
import java.io.FileNotFoundException;
import java.io.InputStream;
import java.util.Map;
import javax.inject.Inject;
/** Publish a single registrar detail report from GCS to Drive. */
@Action(
path = PublishDetailReportAction.PATH,
method = Action.Method.POST,
xsrfProtection = true,
xsrfScope = "admin")
public final class PublishDetailReportAction implements Runnable, JsonAction {
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
/** MIME type to use for deposited report files in Drive. */
private static final MediaType REPORT_MIME_TYPE = MediaType.CSV_UTF_8;
/** Endpoint to which JSON should be sent for this servlet. See {@code web.xml}. */
public static final String PATH = "/_dr/publishDetailReport";
/** Name of parameter indicating the registrar for which this report will be published. */
public static final String REGISTRAR_ID_PARAM = "registrar";
/** Name of parameter providing a name for the report file placed in Drive (the base name). */
public static final String DETAIL_REPORT_NAME_PARAM = "report";
/**
* Name of parameter giving the prefix of the GCS object name to use as the report contents.
* Concatenating this value with the value of the "report" parameter gives the full object name.
*/
public static final String GCS_FOLDER_PREFIX_PARAM = "gcsFolder";
/** Name of parameter giving the GCS bucket name for the file to use as the report contents. */
public static final String GCS_BUCKET_PARAM = "bucket";
@Inject DriveConnection driveConnection;
@Inject GcsUtils gcsUtils;
@Inject JsonActionRunner runner;
@Inject PublishDetailReportAction() {}
@Override
public void run() {
runner.run(this);
}
/** Copy a detail report from Cloud Storage to Drive. */
@Override
public Map<String, Object> handleJsonRequest(Map<String, ?> json) {
try {
logger.infofmt("Publishing detail report for parameters: %s", json);
String registrarId = getParam(json, REGISTRAR_ID_PARAM);
Registrar registrar = checkArgumentNotNull(Registrar.loadByClientId(registrarId),
"Registrar %s not found", registrarId);
String driveFolderId = checkArgumentNotNull(registrar.getDriveFolderId(),
"No drive folder associated with registrar " + registrarId);
String gcsBucketName = getParam(json, GCS_BUCKET_PARAM);
String gcsObjectName =
getParam(json, GCS_FOLDER_PREFIX_PARAM) + getParam(json, DETAIL_REPORT_NAME_PARAM);
try (InputStream input =
gcsUtils.openInputStream(new GcsFilename(gcsBucketName, gcsObjectName))) {
String driveId =
driveConnection.createFile(
getParam(json, DETAIL_REPORT_NAME_PARAM),
REPORT_MIME_TYPE,
driveFolderId,
ByteStreams.toByteArray(input));
logger.infofmt("Published detail report for %s to folder %s using GCS file gs://%s/%s.",
registrarId,
driveFolderId,
gcsBucketName,
gcsObjectName);
return ImmutableMap.<String, Object>of("driveId", driveId);
} catch (FileNotFoundException e) {
throw new IllegalArgumentException(e.getMessage(), e);
}
} catch (Throwable e) {
logger.severe(e, e.toString());
String message = firstNonNull(e.getMessage(), e.toString());
throw e instanceof IllegalArgumentException
? new BadRequestException(message) : new InternalServerErrorException(message);
}
}
private String getParam(Map<String, ?> json, String paramName) {
return (String) checkArgumentNotNull(
json.get(paramName),
"Missing required parameter: %s", paramName);
}
}

View file

@ -1,237 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.domain.registry.model.ofy.ObjectifyService.ofy;
import static com.google.domain.registry.request.Action.Method.POST;
import static com.google.domain.registry.util.CollectionUtils.nullToEmpty;
import static com.google.domain.registry.util.RegistrarUtils.normalizeClientId;
import static javax.servlet.http.HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
import static javax.servlet.http.HttpServletResponse.SC_OK;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Sets;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.groups.GroupsConnection;
import com.google.domain.registry.groups.GroupsConnection.Role;
import com.google.domain.registry.model.registrar.Registrar;
import com.google.domain.registry.model.registrar.RegistrarContact;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Response;
import com.google.domain.registry.util.Concurrent;
import com.google.domain.registry.util.FormattingLogger;
import com.googlecode.objectify.VoidWork;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import javax.annotation.Nullable;
import javax.inject.Inject;
/**
* Action that syncs changes to {@link RegistrarContact} entities with Google Groups.
*
* <p>This uses the <a href="https://developers.google.com/admin-sdk/directory/">Directory API</a>.
*/
@Action(path = "/_dr/task/syncGroupMembers", method = POST)
public final class SyncGroupMembersAction implements Runnable {
/**
* The number of threads to run simultaneously (one per registrar) while processing group syncs.
* This number is purposefully low because App Engine will complain about a large number of
* requests per second, so it's better to spread the work out (as we are only running this servlet
* once per hour anyway).
*/
private static final int NUM_WORK_THREADS = 2;
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
private enum Result {
OK(SC_OK, "Group memberships successfully updated."),
NOT_MODIFIED(SC_OK, "No registrar contacts have been updated since the last time servlet ran."),
FAILED(SC_INTERNAL_SERVER_ERROR, "Error occurred while updating registrar contacts.") {
@Override
protected void log(Throwable cause) {
logger.severefmt(cause, "%s", message);
}};
final int statusCode;
final String message;
private Result(int statusCode, String message) {
this.statusCode = statusCode;
this.message = message;
}
/** Log an error message. Results that use log levels other than info should override this. */
void log(@Nullable Throwable cause) {
logger.infofmt(cause, "%s", message);
}
}
@Inject GroupsConnection groupsConnection;
@Inject Response response;
@Inject @Config("publicDomainName") String publicDomainName;
@Inject SyncGroupMembersAction() {}
private void sendResponse(Result result, @Nullable List<Throwable> causes) {
for (Throwable cause : nullToEmpty(causes)) {
result.log(cause);
}
response.setStatus(result.statusCode);
response.setPayload(String.format("%s %s\n", result.name(), result.message));
}
/**
* Returns the Google Groups email address for the given registrar clientId and
* RegistrarContact.Type
*/
public static String getGroupEmailAddressForContactType(
String clientId,
RegistrarContact.Type type,
String publicDomainName) {
// Take the registrar's clientId, make it lowercase, and remove all characters that aren't
// alphanumeric, hyphens, or underscores.
return String.format(
"%s-%s-contacts@%s", normalizeClientId(clientId), type.getDisplayName(), publicDomainName);
}
/**
* Loads all Registrars, and for each one that is marked dirty, grabs the existing group
* memberships and updates them to reflect the current state of the RegistrarContacts.
*/
@Override
public void run() {
List<Registrar> dirtyRegistrars = Registrar
.loadAllActive()
.filter(new Predicate<Registrar>() {
@Override
public boolean apply(Registrar registrar) {
// Only grab registrars that require syncing and are of the correct type.
return registrar.getContactsRequireSyncing()
&& registrar.getType() == Registrar.Type.REAL;
}})
.toList();
if (dirtyRegistrars.isEmpty()) {
sendResponse(Result.NOT_MODIFIED, null);
return;
}
// Run multiple threads to communicate with Google Groups simultaneously.
ImmutableList<Optional<Throwable>> results = Concurrent.transform(
dirtyRegistrars,
NUM_WORK_THREADS,
new Function<Registrar, Optional<Throwable>>() {
@Override
public Optional<Throwable> apply(final Registrar registrar) {
try {
syncRegistrarContacts(registrar);
return Optional.<Throwable> absent();
} catch (Throwable e) {
logger.severe(e, e.getMessage());
return Optional.of(e);
}
}});
List<Throwable> errors = getErrorsAndUpdateFlagsForSuccesses(dirtyRegistrars, results);
// If there were no errors, return success; otherwise return a failed status and log the errors.
if (errors.isEmpty()) {
sendResponse(Result.OK, null);
} else {
sendResponse(Result.FAILED, errors);
}
}
/**
* Parses the results from Google Groups for each registrar, setting the dirty flag to false in
* Datastore for the calls that succeeded and accumulating the errors for the calls that failed.
*/
private List<Throwable> getErrorsAndUpdateFlagsForSuccesses(
List<Registrar> registrars,
List<Optional<Throwable>> results) {
final ImmutableList.Builder<Registrar> registrarsToSave = new ImmutableList.Builder<>();
List<Throwable> errors = new ArrayList<>();
for (int i = 0; i < results.size(); i++) {
Optional<Throwable> opt = results.get(i);
if (opt.isPresent()) {
errors.add(opt.get());
} else {
registrarsToSave.add(
registrars.get(i).asBuilder().setContactsRequireSyncing(false).build());
}
}
ofy().transactNew(new VoidWork() {
@Override
public void vrun() {
ofy().save().entities(registrarsToSave.build());
}});
return errors;
}
/** Syncs the contacts for an individual registrar to Google Groups. */
private void syncRegistrarContacts(Registrar registrar) {
String groupKey = "";
try {
Set<RegistrarContact> registrarContacts = registrar.getContacts();
long totalAdded = 0;
long totalRemoved = 0;
for (final RegistrarContact.Type type : RegistrarContact.Type.values()) {
groupKey = getGroupEmailAddressForContactType(
registrar.getClientIdentifier(), type, publicDomainName);
Set<String> currentMembers = groupsConnection.getMembersOfGroup(groupKey);
Set<String> desiredMembers = FluentIterable.from(registrarContacts)
.filter(new Predicate<RegistrarContact>() {
@Override
public boolean apply(RegistrarContact contact) {
return contact.getTypes().contains(type);
}})
.transform(new Function<RegistrarContact, String>() {
@Override
public String apply(RegistrarContact contact) {
return contact.getEmailAddress();
}})
.toSet();
for (String email : Sets.difference(desiredMembers, currentMembers)) {
groupsConnection.addMemberToGroup(groupKey, email, Role.MEMBER);
totalAdded++;
}
for (String email : Sets.difference(currentMembers, desiredMembers)) {
groupsConnection.removeMemberFromGroup(groupKey, email);
totalRemoved++;
}
}
logger.infofmt("Successfully synced contacts for registrar %s: added %d and removed %d",
registrar.getClientIdentifier(),
totalAdded,
totalRemoved);
} catch (IOException e) {
// Bail out of the current sync job if an error occurs. This is OK because (a) errors usually
// indicate that retrying won't succeed at all, or at least not immediately, and (b) the sync
// job will run within an hour anyway and effectively resume where it left off if this was a
// transient error.
String msg = String.format("Couldn't sync contacts for registrar %s to group %s",
registrar.getClientIdentifier(), groupKey);
throw new RuntimeException(msg, e);
}
}
}

View file

@ -1,116 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export;
import static com.google.domain.registry.request.Action.Method.POST;
import com.google.api.client.googleapis.json.GoogleJsonResponseException;
import com.google.api.services.bigquery.Bigquery;
import com.google.api.services.bigquery.model.Table;
import com.google.api.services.bigquery.model.TableReference;
import com.google.api.services.bigquery.model.ViewDefinition;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.appengine.api.taskqueue.TaskOptions.Method;
import com.google.domain.registry.bigquery.BigqueryFactory;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.HttpException.InternalServerErrorException;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.util.FormattingLogger;
import com.google.domain.registry.util.SqlTemplate;
import java.io.IOException;
import javax.inject.Inject;
/** Update a well-known view to point at a certain datastore snapshot table in BigQuery. */
@Action(path = UpdateSnapshotViewAction.PATH, method = POST)
public class UpdateSnapshotViewAction implements Runnable {
/** Headers for passing parameters into the servlet. */
static final String UPDATE_SNAPSHOT_DATASET_ID_PARAM = "dataset";
static final String UPDATE_SNAPSHOT_TABLE_ID_PARAM = "table";
static final String UPDATE_SNAPSHOT_KIND_PARAM = "kind";
static final String LATEST_SNAPSHOT_DATASET = "latest_snapshot";
/** Servlet-specific details needed for enqueuing tasks against itself. */
static final String QUEUE = "export-snapshot-update-view"; // See queue.xml.
static final String PATH = "/_dr/task/updateSnapshotView"; // See web.xml.
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@Inject @Parameter(UPDATE_SNAPSHOT_DATASET_ID_PARAM) String datasetId;
@Inject @Parameter(UPDATE_SNAPSHOT_TABLE_ID_PARAM) String tableId;
@Inject @Parameter(UPDATE_SNAPSHOT_KIND_PARAM) String kindName;
@Inject @Config("projectId") String projectId;
@Inject BigqueryFactory bigqueryFactory;
@Inject UpdateSnapshotViewAction() {}
/** Create a task for updating a snapshot view. */
public static TaskOptions createViewUpdateTask(
String datasetId, String tableId, String kindName) {
return TaskOptions.Builder.withUrl(PATH)
.method(Method.POST)
.param(UPDATE_SNAPSHOT_DATASET_ID_PARAM, datasetId)
.param(UPDATE_SNAPSHOT_TABLE_ID_PARAM, tableId)
.param(UPDATE_SNAPSHOT_KIND_PARAM, kindName);
}
@Override
public void run() {
try {
updateSnapshotView(datasetId, tableId, kindName);
} catch (Throwable e) {
logger.severefmt(e, "Could not update snapshot view for table %s", tableId);
throw new InternalServerErrorException("Error in update snapshot view action");
}
}
private void updateSnapshotView(String datasetId, String tableId, String kindName)
throws IOException {
Bigquery bigquery = bigqueryFactory.create(projectId, LATEST_SNAPSHOT_DATASET);
updateTable(bigquery, new Table()
.setTableReference(new TableReference()
.setProjectId(projectId)
.setDatasetId(LATEST_SNAPSHOT_DATASET)
.setTableId(kindName))
.setView(new ViewDefinition().setQuery(
SqlTemplate.create("SELECT * FROM [%DATASET%.%TABLE%]")
.put("DATASET", datasetId)
.put("TABLE", tableId)
.build())));
logger.infofmt(
"Updated view %s:%s to point at snapshot table %s:%s.",
LATEST_SNAPSHOT_DATASET,
kindName,
datasetId,
tableId);
}
private static void updateTable(Bigquery bigquery, Table table) throws IOException {
TableReference ref = table.getTableReference();
try {
bigquery.tables()
.update(ref.getProjectId(), ref.getDatasetId(), ref.getTableId(), table)
.execute();
} catch (GoogleJsonResponseException e) {
if (e.getDetails().getCode() == 404) {
bigquery.tables().insert(ref.getProjectId(), ref.getDatasetId(), table).execute();
}
}
}
}

View file

@ -1,16 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
@javax.annotation.ParametersAreNonnullByDefault
package com.google.domain.registry.export;

View file

@ -1,26 +0,0 @@
package(default_visibility = ["//java/com/google/domain/registry:registry_project"])
java_library(
name = "sheet",
srcs = glob(["*.java"]),
deps = [
"//java/com/google/api/client/googleapis/auth/oauth2",
"//java/com/google/common/base",
"//java/com/google/common/collect",
"//java/com/google/common/io",
"//java/com/google/common/net",
"//java/com/google/domain/registry/config",
"//java/com/google/domain/registry/model",
"//java/com/google/domain/registry/request",
"//java/com/google/domain/registry/util",
"//java/com/google/gdata",
"//java/com/google/gdata:spreadsheet",
"//third_party/java/appengine:appengine-api",
"//third_party/java/dagger",
"//third_party/java/joda_time",
"//third_party/java/jsr305_annotations",
"//third_party/java/jsr330_inject",
"//third_party/java/servlet/servlet_api",
],
)

View file

@ -1,36 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export.sheet;
import static com.google.common.base.Strings.emptyToNull;
import com.google.common.base.Optional;
import com.google.domain.registry.request.Parameter;
import dagger.Module;
import dagger.Provides;
import javax.servlet.http.HttpServletRequest;
/** Dagger module for the sheet package. */
@Module
public final class SheetModule {
@Provides
@Parameter("id")
static Optional<String> provideId(HttpServletRequest req) {
return Optional.fromNullable(emptyToNull(req.getParameter("id")));
}
}

View file

@ -1,106 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export.sheet;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.gdata.client.spreadsheet.SpreadsheetService;
import com.google.gdata.data.spreadsheet.CustomElementCollection;
import com.google.gdata.data.spreadsheet.ListEntry;
import com.google.gdata.data.spreadsheet.ListFeed;
import com.google.gdata.data.spreadsheet.SpreadsheetEntry;
import com.google.gdata.data.spreadsheet.WorksheetEntry;
import com.google.gdata.util.ServiceException;
import java.io.IOException;
import java.net.URL;
import java.util.List;
import javax.inject.Inject;
/** Generic data synchronization utility for Google Spreadsheets. */
class SheetSynchronizer {
private static final String SPREADSHEET_URL_PREFIX =
"https://spreadsheets.google.com/feeds/spreadsheets/";
@Inject SpreadsheetService spreadsheetService;
@Inject SheetSynchronizer() {}
/**
* Replace the contents of a Google Spreadsheet with {@code data}.
*
* <p>In order for this to work, you must create a spreadsheet with a header row, each containing
* the column name, without any spaces. All subsequent rows are considered data, so long as
* they're not blank. If you have a blank row in the middle of your data, you're going to have
* problems. You must also make sure that the spreadsheet has been shared with the API client
* credential email address.
*
* <p>The algorithm works by first assuming that the spreadsheet is sorted in the same way that
* {@code data} is sorted. It then iterates through the existing rows and comparing them to the
* items in {@code data}. Iteration continues until we either run out of rows, or items in
* {@code data}. If there's any rows remaining, they'll be deleted. If instead, items remain in
* data, they'll be inserted.
*
* @param spreadsheetId The ID of your spreadsheet. This can be obtained by opening the Google
* spreadsheet in your browser and copying the ID from the URL.
* @param data This should be a <i>sorted</i> list of rows containing the enterity of the
* spreadsheet. Each row is a map, where the key must be exactly the same as the column header
* cell in the spreadsheet, and value is an arbitrary object which will be converted to a
* string before storing it in the spreadsheet.
* @throws IOException error communicating with the GData service.
* @throws ServiceException if a system error occurred when retrieving the entry.
* @throws com.google.gdata.util.ParseException error parsing the returned entry.
* @throws com.google.gdata.util.ResourceNotFoundException if an entry URL is not valid.
* @throws com.google.gdata.util.ServiceForbiddenException if the GData service cannot get the
* entry resource due to access constraints.
* @see "https://developers.google.com/google-apps/spreadsheets/"
*/
void synchronize(String spreadsheetId, ImmutableList<ImmutableMap<String, String>> data)
throws IOException, ServiceException {
URL url = new URL(SPREADSHEET_URL_PREFIX + spreadsheetId);
SpreadsheetEntry spreadsheet = spreadsheetService.getEntry(url, SpreadsheetEntry.class);
WorksheetEntry worksheet = spreadsheet.getWorksheets().get(0);
worksheet.setRowCount(data.size());
worksheet = worksheet.update();
ListFeed listFeed = spreadsheetService.getFeed(worksheet.getListFeedUrl(), ListFeed.class);
List<ListEntry> entries = listFeed.getEntries();
int commonSize = Math.min(entries.size(), data.size());
for (int i = 0; i < commonSize; i++) {
ListEntry entry = entries.get(i);
CustomElementCollection elements = entry.getCustomElements();
boolean mutated = false;
for (ImmutableMap.Entry<String, String> cell : data.get(i).entrySet()) {
if (!cell.getValue().equals(elements.getValue(cell.getKey()))) {
mutated = true;
elements.setValueLocal(cell.getKey(), cell.getValue());
}
}
if (mutated) {
entry.update();
}
}
if (data.size() > entries.size()) {
for (int i = entries.size(); i < data.size(); i++) {
ListEntry entry = listFeed.createEntry();
CustomElementCollection elements = entry.getCustomElements();
for (ImmutableMap.Entry<String, String> cell : data.get(i).entrySet()) {
elements.setValueLocal(cell.getKey(), cell.getValue());
}
listFeed.insert(entry);
}
}
}
}

View file

@ -1,39 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export.sheet;
import com.google.api.client.googleapis.auth.oauth2.GoogleCredential;
import com.google.common.collect.ImmutableList;
import com.google.gdata.client.spreadsheet.SpreadsheetService;
import dagger.Module;
import dagger.Provides;
/** Dagger module for {@link SpreadsheetService}. */
@Module
public final class SpreadsheetServiceModule {
private static final String APPLICATION_NAME = "google-registry-v1";
private static final ImmutableList<String> SCOPES = ImmutableList.of(
"https://spreadsheets.google.com/feeds",
"https://docs.google.com/feeds");
@Provides
static SpreadsheetService provideSpreadsheetService(GoogleCredential credential) {
SpreadsheetService service = new SpreadsheetService(APPLICATION_NAME);
service.setOAuth2Credentials(credential.createScoped(SCOPES));
return service;
}
}

View file

@ -1,211 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export.sheet;
import static com.google.common.base.MoreObjects.firstNonNull;
import static com.google.domain.registry.model.registrar.RegistrarContact.Type.ABUSE;
import static com.google.domain.registry.model.registrar.RegistrarContact.Type.ADMIN;
import static com.google.domain.registry.model.registrar.RegistrarContact.Type.BILLING;
import static com.google.domain.registry.model.registrar.RegistrarContact.Type.LEGAL;
import static com.google.domain.registry.model.registrar.RegistrarContact.Type.MARKETING;
import static com.google.domain.registry.model.registrar.RegistrarContact.Type.TECH;
import static com.google.domain.registry.model.registrar.RegistrarContact.Type.WHOIS;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.base.Predicate;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSortedSet;
import com.google.common.collect.Ordering;
import com.google.domain.registry.model.registrar.Registrar;
import com.google.domain.registry.model.registrar.RegistrarAddress;
import com.google.domain.registry.model.registrar.RegistrarContact;
import com.google.domain.registry.util.Clock;
import com.google.domain.registry.util.DateTimeUtils;
import com.google.gdata.util.ServiceException;
import org.joda.time.DateTime;
import org.joda.time.Duration;
import java.io.IOException;
import javax.annotation.Nullable;
import javax.inject.Inject;
/**
* Class for synchronizing all {@link Registrar} datastore objects to a Google Spreadsheet.
*
* @see SyncRegistrarsSheetAction
*/
class SyncRegistrarsSheet {
@Inject Clock clock;
@Inject SheetSynchronizer sheetSynchronizer;
@Inject SyncRegistrarsSheet() {}
/** Returns true if a {@link Registrar} entity was modified in past {@code duration}. */
boolean wasRegistrarsModifiedInLast(Duration duration) {
DateTime watermark = clock.nowUtc().minus(duration);
for (Registrar registrar : Registrar.loadAll()) {
if (DateTimeUtils.isAtOrAfter(registrar.getLastUpdateTime(), watermark)) {
return true;
}
}
return false;
}
/** Performs the synchronization operation. */
void run(String spreadsheetId) throws IOException, ServiceException {
sheetSynchronizer.synchronize(
spreadsheetId,
FluentIterable
.from(
new Ordering<Registrar>() {
@Override
public int compare(Registrar left, Registrar right) {
return left.getClientIdentifier().compareTo(right.getClientIdentifier());
}
}.immutableSortedCopy(Registrar.loadAll()))
.filter(
new Predicate<Registrar>() {
@Override
public boolean apply(Registrar registrar) {
return registrar.getType() == Registrar.Type.REAL
|| registrar.getType() == Registrar.Type.OTE;
}
})
.transform(
new Function<Registrar, ImmutableMap<String, String>>() {
@Override
public ImmutableMap<String, String> apply(Registrar registrar) {
ImmutableMap.Builder<String, String> builder = new ImmutableMap.Builder<>();
ImmutableSortedSet<RegistrarContact> contacts = registrar.getContacts();
RegistrarAddress address =
firstNonNull(
registrar.getLocalizedAddress(),
firstNonNull(
registrar.getInternationalizedAddress(),
new RegistrarAddress.Builder()
.setStreet(ImmutableList.of("UNKNOWN"))
.setCity("UNKNOWN")
.setCountryCode("US")
.build()));
//
// °° WARNING WARNING WARNING
//
// Do not change these mappings simply because the Registrar model changed. Only
// change these mappings if the people who use the spreadsheet requested it be
// changed.
//
// These values are hard-coded because they correspond to actual spreadsheet
// columns. If you change this dictionary, then you'll need to manually add new
// columns to the registrar spreadsheets for all environments before deployment,
// and you'll need to remove deleted columns probably like a week after
// deployment.
//
builder.put("clientIdentifier", convert(registrar.getClientIdentifier()));
builder.put("registrarName", convert(registrar.getRegistrarName()));
builder.put("state", convert(registrar.getState()));
builder.put("ianaIdentifier", convert(registrar.getIanaIdentifier()));
builder.put("billingIdentifier", convert(registrar.getBillingIdentifier()));
builder.put("primaryContacts", convertContacts(contacts, byType(ADMIN)));
builder.put("techContacts", convertContacts(contacts, byType(TECH)));
builder.put("marketingContacts", convertContacts(contacts, byType(MARKETING)));
builder.put("abuseContacts", convertContacts(contacts, byType(ABUSE)));
builder.put("whoisInquiryContacts", convertContacts(contacts, byType(WHOIS)));
builder.put("legalContacts", convertContacts(contacts, byType(LEGAL)));
builder.put("billingContacts", convertContacts(contacts, byType(BILLING)));
builder.put(
"contactsMarkedAsWhoisAdmin",
convertContacts(
contacts,
new Predicate<RegistrarContact>() {
@Override
public boolean apply(RegistrarContact contact) {
return contact.getVisibleInWhoisAsAdmin();
}
}));
builder.put(
"contactsMarkedAsWhoisTech",
convertContacts(
contacts,
new Predicate<RegistrarContact>() {
@Override
public boolean apply(RegistrarContact contact) {
return contact.getVisibleInWhoisAsTech();
}
}));
builder.put("emailAddress", convert(registrar.getEmailAddress()));
builder.put("address.street", convert(address.getStreet()));
builder.put("address.city", convert(address.getCity()));
builder.put("address.state", convert(address.getState()));
builder.put("address.zip", convert(address.getZip()));
builder.put("address.countryCode", convert(address.getCountryCode()));
builder.put("phoneNumber", convert(registrar.getPhoneNumber()));
builder.put("faxNumber", convert(registrar.getFaxNumber()));
builder.put("creationTime", convert(registrar.getCreationTime()));
builder.put("lastUpdateTime", convert(registrar.getLastUpdateTime()));
builder.put("allowedTlds", convert(registrar.getAllowedTlds()));
builder.put("whoisServer", convert(registrar.getWhoisServer()));
builder.put("blockPremiumNames", convert(registrar.getBlockPremiumNames()));
builder.put("ipAddressWhitelist", convert(registrar.getIpAddressWhitelist()));
builder.put("url", convert(registrar.getUrl()));
builder.put("referralUrl", convert(registrar.getReferralUrl()));
builder.put("icannReferralEmail", convert(registrar.getIcannReferralEmail()));
return builder.build();
}
})
.toList());
}
private static String convertContacts(
Iterable<RegistrarContact> contacts, Predicate<RegistrarContact> filter) {
StringBuilder result = new StringBuilder();
boolean first = true;
for (RegistrarContact contact : contacts) {
if (!filter.apply(contact)) {
continue;
}
if (first) {
first = false;
} else {
result.append("\n");
}
result.append(contact.toStringMultilinePlainText());
}
return result.toString();
}
private static Predicate<RegistrarContact> byType(final RegistrarContact.Type type) {
return new Predicate<RegistrarContact>() {
@Override
public boolean apply(RegistrarContact contact) {
return contact.getTypes().contains(type);
}};
}
/** Converts a value to a string representation that can be stored in a spreadsheet cell. */
private static String convert(@Nullable Object value) {
if (value == null) {
return "";
} else if (value instanceof Iterable) {
return Joiner.on('\n').join((Iterable<?>) value);
} else {
return value.toString();
}
}
}

View file

@ -1,162 +0,0 @@
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.domain.registry.export.sheet;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static com.google.appengine.api.taskqueue.TaskOptions.Builder.withUrl;
import static com.google.common.net.MediaType.PLAIN_TEXT_UTF_8;
import static com.google.domain.registry.request.Action.Method.POST;
import static javax.servlet.http.HttpServletResponse.SC_BAD_REQUEST;
import static javax.servlet.http.HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
import static javax.servlet.http.HttpServletResponse.SC_NO_CONTENT;
import static javax.servlet.http.HttpServletResponse.SC_OK;
import com.google.appengine.api.modules.ModulesService;
import com.google.appengine.api.modules.ModulesServiceFactory;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.appengine.api.taskqueue.TaskOptions.Method;
import com.google.common.base.Optional;
import com.google.domain.registry.config.ConfigModule.Config;
import com.google.domain.registry.model.server.Lock;
import com.google.domain.registry.request.Action;
import com.google.domain.registry.request.Parameter;
import com.google.domain.registry.request.Response;
import com.google.domain.registry.util.FormattingLogger;
import com.google.domain.registry.util.NonFinalForTesting;
import com.google.gdata.util.ServiceException;
import org.joda.time.Duration;
import java.io.IOException;
import java.util.concurrent.Callable;
import javax.annotation.Nullable;
import javax.inject.Inject;
/**
* Action for synchronizing the registrars spreadsheet.
*
* <p>You can specify the spreadsheet ID by passing the "id" parameter. If this parameter is not
* specified, then the spreadsheet ID will be obtained from the registry configuration.
*
* <p>Cron will run this action hourly. So in order to minimize Google Spreadsheets I/O, this action
* will iterate through all registrars and check if any entries were modified in the past hour. If
* no modifications were made, the action will exit without performing any syncing.
*
* <p><b>Note:</b> Setting the "id" parameter will disable the registrar update check.
*
* <p>Before using this service, you should make sure all the column headers listed in this source
* file are present. You also need to share the spreadsheet with the email address from the JSON
* credential file and give it edit permission.
*
* @see SyncRegistrarsSheet
*/
@Action(path = SyncRegistrarsSheetAction.PATH, method = POST)
public class SyncRegistrarsSheetAction implements Runnable {
private enum Result {
OK(SC_OK, "Sheet successfully updated."),
NOTMODIFIED(SC_OK, "Registrars table hasn't been modified in past hour."),
LOCKED(SC_NO_CONTENT, "Another task is currently writing to this sheet; dropping task."),
MISSINGNO(SC_BAD_REQUEST, "No sheet ID specified or configured; dropping task.") {
@Override
protected void log(Exception cause) {
logger.warningfmt(cause, "%s", message);
}},
FAILED(SC_INTERNAL_SERVER_ERROR, "Spreadsheet synchronization failed") {
@Override
protected void log(Exception cause) {
logger.severefmt(cause, "%s", message);
}};
private final int statusCode;
protected final String message;
private Result(int statusCode, String message) {
this.statusCode = statusCode;
this.message = message;
}
/** Log an error message. Results that use log levels other than info should override this. */
protected void log(@Nullable Exception cause) {
logger.infofmt(cause, "%s", message);
}
private void send(Response response, @Nullable Exception cause) {
log(cause);
response.setStatus(statusCode);
response.setContentType(PLAIN_TEXT_UTF_8);
response.setPayload(String.format("%s %s\n", name(), message));
}
}
public static final String PATH = "/_dr/task/syncRegistrarsSheet";
private static final String QUEUE = "sheet";
private static final String LOCK_NAME = "Synchronize registrars sheet";
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
@NonFinalForTesting
private static ModulesService modulesService = ModulesServiceFactory.getModulesService();
@Inject Response response;
@Inject SyncRegistrarsSheet syncRegistrarsSheet;
@Inject @Config("sheetLockTimeout") Duration timeout;
@Inject @Config("sheetRegistrarId") Optional<String> idConfig;
@Inject @Config("sheetRegistrarInterval") Duration interval;
@Inject @Parameter("id") Optional<String> idParam;
@Inject SyncRegistrarsSheetAction() {}
@Override
public void run() {
final Optional<String> sheetId = idParam.or(idConfig);
if (!sheetId.isPresent()) {
Result.MISSINGNO.send(response, null);
return;
}
if (!idParam.isPresent()) {
// TODO(b/19082368): Use a cursor.
if (!syncRegistrarsSheet.wasRegistrarsModifiedInLast(interval)) {
Result.NOTMODIFIED.send(response, null);
return;
}
}
String sheetLockName = String.format("%s: %s", LOCK_NAME, sheetId.get());
Callable<Void> runner = new Callable<Void>() {
@Nullable
@Override
public Void call() throws IOException {
try {
syncRegistrarsSheet.run(sheetId.get());
Result.OK.send(response, null);
} catch (IOException | ServiceException e) {
Result.FAILED.send(response, e);
}
return null;
}
};
if (!Lock.executeWithLocks(runner, getClass(), "", timeout, sheetLockName)) {
// If we fail to acquire the lock, it probably means lots of updates are happening at once, in
// which case it should be safe to not bother. The task queue definition should *not* specify
// max-concurrent-requests for this very reason.
Result.LOCKED.send(response, null);
}
}
/** Creates, enqueues, and returns a new backend task to sync registrar spreadsheets. */
public static TaskHandle enqueueBackendTask() {
String hostname = modulesService.getVersionHostname("backend", null);
return getQueue(QUEUE).add(withUrl(PATH).method(Method.GET).header("Host", hostname));
}
}

Some files were not shown because too many files have changed in this diff Show more