Refactor to be more in line with a standard Gradle project structure

This commit is contained in:
Gus Brodman 2019-05-21 14:12:47 -04:00
parent 8fa45e8c76
commit a7a983bfed
3141 changed files with 99 additions and 100 deletions

View file

@ -0,0 +1 @@
package(default_visibility = ["//visibility:public"])

View file

@ -0,0 +1,106 @@
# Copyright 2017 The Nomulus Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate java test rules from given test_files.
Instead of having to create one test rule per test in the BUILD file, this rule
provides a handy way to create a bunch of test rules for the specified test
files.
"""
def GenTestRules(
name,
test_files,
deps,
exclude_tests = [],
default_test_size = "small",
small_tests = [],
medium_tests = [],
large_tests = [],
enormous_tests = [],
resources = [],
flaky_tests = [],
tags = [],
prefix = "",
jvm_flags = [],
args = [],
visibility = None,
shard_count = 1):
for test in _get_test_names(test_files):
if test in exclude_tests:
continue
test_size = default_test_size
if test in small_tests:
test_size = "small"
if test in medium_tests:
test_size = "medium"
if test in large_tests:
test_size = "large"
if test in enormous_tests:
test_size = "enormous"
flaky = 0
if (test in flaky_tests) or ("flaky" in tags):
flaky = 1
java_class = _package_from_path(
native.package_name() + "/" + _strip_right(test, ".java"),
)
package = java_class[:java_class.rfind(".")]
native.java_test(
name = prefix + test,
runtime_deps = deps,
resources = resources,
size = test_size,
jvm_flags = jvm_flags,
args = args,
flaky = flaky,
tags = tags,
test_class = java_class,
visibility = visibility,
shard_count = shard_count,
)
def _get_test_names(test_files):
test_names = []
for test_file in test_files:
if not test_file.endswith("Test.java"):
continue
test_names += [test_file[:-5]]
return test_names
def _package_from_path(package_path, src_impls = None):
src_impls = src_impls or ["javatests/", "java/"]
for src_impl in src_impls:
if not src_impl.endswith("/"):
src_impl += "/"
index = _index_of_end(package_path, src_impl)
if index >= 0:
package_path = package_path[index:]
break
return package_path.replace("/", ".")
def _strip_right(str, suffix):
"""Returns str without the suffix if it ends with suffix."""
if str.endswith(suffix):
return str[0:len(str) - len(suffix)]
else:
return str
def _index_of_end(str, part):
"""If part is in str, return the index of the first character after part.
Return -1 if part is not in str."""
index = str.find(part)
if index >= 0:
return index + len(part)
return -1

View file

@ -0,0 +1,559 @@
package(
default_visibility = ["//java/google/registry:registry_project"],
)
licenses(["notice"]) # Apache 2.0
load("//java/google/registry/builddefs:zip_file.bzl", "zip_file")
load("//java/google/registry/builddefs:registry_ear_file.bzl", "registry_ear_file")
package_group(
name = "registry_project",
packages = [
"//java/google/registry/...",
"//javatests/google/registry/...",
"//python/...",
],
)
zip_file(
name = "common_war",
srcs = [
"@com_google_appengine_api_1_0_sdk",
],
out = "mandatory_stuff.war",
mappings = {
"com_google_appengine_api_1_0_sdk": "WEB-INF/lib",
},
visibility = ["//visibility:private"],
)
################################################################################
# DOMAIN REGISTRY :: PRODUCTION ENVIRONMENT
registry_ear_file(
name = "registry_ear",
out = "registry.ear",
configs = {
"env/common/META-INF/appengine-application.xml": "META-INF/appengine-application.xml",
"env/common/META-INF/application.xml": "META-INF/application.xml",
},
wars = {
"registry_default.war": "default",
"registry_pubapi.war": "pubapi",
"registry_backend.war": "backend",
"registry_tools.war": "tools",
},
)
# We use the production "nocron" earfile only in the event of a datastore
# restore.
# for details.
registry_ear_file(
name = "registry_nocron_ear",
out = "registry_nocron.ear",
configs = {
"env/common/META-INF/appengine-application.xml": "META-INF/appengine-application.xml",
"env/common/META-INF/application.xml": "META-INF/application.xml",
},
wars = {
"registry_default_nocron.war": "default",
"registry_pubapi.war": "pubapi",
"registry_backend.war": "backend",
"registry_tools.war": "tools",
},
)
zip_file(
name = "registry_default_war",
srcs = [
"env/common/default/WEB-INF/datastore-indexes.xml",
"env/common/default/WEB-INF/dispatch.xml",
"env/common/default/WEB-INF/dos.xml",
"env/common/default/WEB-INF/logging.properties",
"env/common/default/WEB-INF/queue.xml",
"env/common/default/WEB-INF/web.xml",
"env/production/default/WEB-INF/appengine-web.xml",
"env/production/default/WEB-INF/cron.xml",
"//java/google/registry/module/frontend:frontend_jar_deploy.jar",
],
out = "registry_default.war",
mappings = {
"domain_registry/java/google/registry/env/common/default": "",
"domain_registry/java/google/registry/env/production/default": "",
"domain_registry/java/google/registry/module/frontend": "WEB-INF/lib",
},
deps = [
":common_war",
"//java/google/registry/ui:war_debug",
],
)
zip_file(
name = "registry_default_nocron_war",
out = "registry_default_nocron.war",
exclude = ["WEB-INF/cron.xml"],
deps = [":registry_default_war"],
)
zip_file(
name = "registry_pubapi_war",
srcs = [
"env/common/pubapi/WEB-INF/dos.xml",
"env/common/pubapi/WEB-INF/logging.properties",
"env/common/pubapi/WEB-INF/web.xml",
"env/production/pubapi/WEB-INF/appengine-web.xml",
"//java/google/registry/module/pubapi:pubapi_jar_deploy.jar",
],
out = "registry_pubapi.war",
mappings = {
"domain_registry/java/google/registry/env/common/pubapi": "",
"domain_registry/java/google/registry/env/production/pubapi": "",
"domain_registry/java/google/registry/module/pubapi": "WEB-INF/lib",
},
deps = [
":common_war",
"//java/google/registry/ui:war_debug",
],
)
zip_file(
name = "registry_backend_war",
srcs = [
"env/common/backend/WEB-INF/logging.properties",
"env/common/backend/WEB-INF/web.xml",
"env/production/backend/WEB-INF/appengine-web.xml",
"//java/google/registry/module/backend:backend_jar_deploy.jar",
],
out = "registry_backend.war",
mappings = {
"domain_registry/java/google/registry/env/common/backend": "",
"domain_registry/java/google/registry/env/production/backend": "",
"domain_registry/java/google/registry/module/backend": "WEB-INF/lib",
},
deps = [
":common_war",
"//java/google/registry/ui:minimal_war",
],
)
zip_file(
name = "registry_tools_war",
srcs = [
"env/common/tools/WEB-INF/logging.properties",
"env/common/tools/WEB-INF/web.xml",
"env/production/tools/WEB-INF/appengine-web.xml",
"//java/google/registry/module/tools:tools_jar_deploy.jar",
],
out = "registry_tools.war",
mappings = {
"domain_registry/java/google/registry/env/common/tools": "",
"domain_registry/java/google/registry/env/production/tools": "",
"domain_registry/java/google/registry/module/tools": "WEB-INF/lib",
},
deps = [
":common_war",
"//java/google/registry/ui:war",
],
)
################################################################################
# DOMAIN REGISTRY :: QUALITY ASSURANCE ENVIRONMENT
#
# QA is an environment for automated integration testing and manual acceptance testing.
registry_ear_file(
name = "registry_qa_ear",
out = "registry_qa.ear",
configs = {
"env/common/META-INF/appengine-application.xml": "META-INF/appengine-application.xml",
"env/common/META-INF/application.xml": "META-INF/application.xml",
},
wars = {
"registry_default_qa.war": "default",
"registry_pubapi_qa.war": "pubapi",
"registry_backend_qa.war": "backend",
"registry_tools_qa.war": "tools",
},
)
zip_file(
name = "registry_default_qa_war",
srcs = [
"env/qa/default/WEB-INF/appengine-web.xml",
"env/qa/default/WEB-INF/cron.xml",
],
out = "registry_default_qa.war",
mappings = {
"domain_registry/java/google/registry/env/qa/default": "",
},
deps = [":registry_default_war"],
)
zip_file(
name = "registry_pubapi_qa_war",
srcs = [
"env/qa/pubapi/WEB-INF/appengine-web.xml",
],
out = "registry_pubapi_qa.war",
mappings = {
"domain_registry/java/google/registry/env/qa/pubapi": "",
},
deps = [":registry_pubapi_war"],
)
zip_file(
name = "registry_backend_qa_war",
srcs = [
"env/qa/backend/WEB-INF/appengine-web.xml",
],
out = "registry_backend_qa.war",
mappings = {
"domain_registry/java/google/registry/env/qa/backend": "",
},
deps = [":registry_backend_war"],
)
zip_file(
name = "registry_tools_qa_war",
srcs = [
"env/qa/tools/WEB-INF/appengine-web.xml",
],
out = "registry_tools_qa.war",
mappings = {
"domain_registry/java/google/registry/env/qa/tools": "",
},
deps = [":registry_tools_war"],
)
################################################################################
# DOMAIN REGISTRY :: SANDBOX ENVIRONMENT
#
# Sandbox is a production environment that registrar customers use to conduct
# integration tests against the registry service.
registry_ear_file(
name = "registry_sandbox_ear",
out = "registry_sandbox.ear",
configs = {
"env/common/META-INF/appengine-application.xml": "META-INF/appengine-application.xml",
"env/common/META-INF/application.xml": "META-INF/application.xml",
},
wars = {
"registry_default_sandbox.war": "default",
"registry_pubapi_sandbox.war": "pubapi",
"registry_backend_sandbox.war": "backend",
"registry_tools_sandbox.war": "tools",
},
)
zip_file(
name = "registry_default_sandbox_war",
srcs = [
"env/sandbox/default/WEB-INF/appengine-web.xml",
"env/sandbox/default/WEB-INF/cron.xml",
],
out = "registry_default_sandbox.war",
mappings = {
"domain_registry/java/google/registry/env/sandbox/default": "",
},
deps = [":registry_default_war"],
)
zip_file(
name = "registry_pubapi_sandbox_war",
srcs = [
"env/sandbox/pubapi/WEB-INF/appengine-web.xml",
],
out = "registry_pubapi_sandbox.war",
mappings = {
"domain_registry/java/google/registry/env/sandbox/pubapi": "",
},
deps = [":registry_pubapi_war"],
)
zip_file(
name = "registry_backend_sandbox_war",
srcs = [
"env/sandbox/backend/WEB-INF/appengine-web.xml",
],
out = "registry_backend_sandbox.war",
mappings = {
"domain_registry/java/google/registry/env/sandbox/backend": "",
},
deps = [":registry_backend_war"],
)
zip_file(
name = "registry_tools_sandbox_war",
srcs = [
"env/sandbox/tools/WEB-INF/appengine-web.xml",
],
out = "registry_tools_sandbox.war",
mappings = {
"domain_registry/java/google/registry/env/sandbox/tools": "",
},
deps = [":registry_tools_war"],
)
################################################################################
# DOMAIN REGISTRY :: ALPHA ENVIRONMENT
#
# The alpha environment is used by developers to test new features.
registry_ear_file(
name = "registry_alpha_ear",
out = "registry_alpha.ear",
configs = {
"env/common/META-INF/appengine-application.xml": "META-INF/appengine-application.xml",
"env/common/META-INF/application.xml": "META-INF/application.xml",
},
wars = {
"registry_default_alpha.war": "default",
"registry_pubapi_alpha.war": "pubapi",
"registry_backend_alpha.war": "backend",
"registry_tools_alpha.war": "tools",
},
)
# The "nocron" files are, unsurprisingly, versions of the archives that have
# had cron.xml removed. We do this because it's necessary to deploy them in
# this way when restoring a backup.
#
# "nocron" archives are currently prepared for production, alpha and crash. To
# prepare them for another environment, just do something similar.
registry_ear_file(
name = "registry_alpha_nocron_ear",
out = "registry_alpha_nocron.ear",
configs = {
"env/common/META-INF/appengine-application.xml": "META-INF/appengine-application.xml",
"env/common/META-INF/application.xml": "META-INF/application.xml",
},
wars = {
"registry_default_alpha_nocron.war": "default",
"registry_pubapi_alpha.war": "pubapi",
"registry_backend_alpha.war": "backend",
"registry_tools_alpha.war": "tools",
},
)
zip_file(
name = "registry_default_alpha_war",
srcs = [
"env/alpha/default/WEB-INF/appengine-web.xml",
"env/alpha/default/WEB-INF/cron.xml",
],
out = "registry_default_alpha.war",
mappings = {
"domain_registry/java/google/registry/env/alpha/default": "",
},
deps = [":registry_default_war"],
)
zip_file(
name = "registry_default_alpha_nocron_war",
out = "registry_default_alpha_nocron.war",
exclude = ["WEB-INF/cron.xml"],
deps = [":registry_default_alpha_war"],
)
zip_file(
name = "registry_pubapi_alpha_war",
srcs = [
"env/alpha/pubapi/WEB-INF/appengine-web.xml",
],
out = "registry_pubapi_alpha.war",
mappings = {
"domain_registry/java/google/registry/env/alpha/pubapi": "",
},
deps = [":registry_pubapi_war"],
)
zip_file(
name = "registry_backend_alpha_war",
srcs = [
"env/alpha/backend/WEB-INF/appengine-web.xml",
],
out = "registry_backend_alpha.war",
mappings = {
"domain_registry/java/google/registry/env/alpha/backend": "",
},
deps = [":registry_backend_war"],
)
zip_file(
name = "registry_tools_alpha_war",
srcs = [
"env/alpha/tools/WEB-INF/appengine-web.xml",
],
out = "registry_tools_alpha.war",
mappings = {
"domain_registry/java/google/registry/env/alpha/tools": "",
},
deps = [":registry_tools_war"],
)
################################################################################
# DOMAIN REGISTRY :: CRASH ENVIRONMENT
#
# The crash environment is used for testing loads, backups, and restores.
registry_ear_file(
name = "registry_crash_ear",
out = "registry_crash.ear",
configs = {
"env/common/META-INF/appengine-application.xml": "META-INF/appengine-application.xml",
"env/common/META-INF/application.xml": "META-INF/application.xml",
},
wars = {
"registry_default_crash.war": "default",
"registry_pubapi_crash.war": "pubapi",
"registry_backend_crash.war": "backend",
"registry_tools_crash.war": "tools",
},
)
# The "nocron" files are, unsurprisingly, versions of the archives that have
# had cron.xml removed. We do this because it's necessary to deploy them in
# this way when restoring a backup.
#
# "nocron" archives are currently prepared for production, alpha and crash. To
# prepare them for another environment, just do something similar.
registry_ear_file(
name = "registry_crash_nocron_ear",
out = "registry_crash_nocron.ear",
configs = {
"env/common/META-INF/appengine-application.xml": "META-INF/appengine-application.xml",
"env/common/META-INF/application.xml": "META-INF/application.xml",
},
wars = {
"registry_default_crash_nocron.war": "default",
"registry_pubapi_crash.war": "pubapi",
"registry_backend_crash.war": "backend",
"registry_tools_crash.war": "tools",
},
)
zip_file(
name = "registry_default_crash_war",
srcs = [
"env/crash/default/WEB-INF/appengine-web.xml",
"env/crash/default/WEB-INF/cron.xml",
],
out = "registry_default_crash.war",
mappings = {
"domain_registry/java/google/registry/env/crash/default": "",
},
deps = [":registry_default_war"],
)
zip_file(
name = "registry_default_crash_nocron_war",
out = "registry_default_crash_nocron.war",
exclude = ["WEB-INF/cron.xml"],
deps = [":registry_default_crash_war"],
)
zip_file(
name = "registry_pubapi_crash_war",
srcs = [
"env/crash/pubapi/WEB-INF/appengine-web.xml",
],
out = "registry_pubapi_crash.war",
mappings = {
"domain_registry/java/google/registry/env/crash/pubapi": "",
},
deps = [":registry_pubapi_war"],
)
zip_file(
name = "registry_backend_crash_war",
srcs = [
"env/crash/backend/WEB-INF/appengine-web.xml",
],
out = "registry_backend_crash.war",
mappings = {
"domain_registry/java/google/registry/env/crash/backend": "",
},
deps = [":registry_backend_war"],
)
zip_file(
name = "registry_tools_crash_war",
srcs = [
"env/crash/tools/WEB-INF/appengine-web.xml",
],
out = "registry_tools_crash.war",
mappings = {
"domain_registry/java/google/registry/env/crash/tools": "",
},
deps = [":registry_tools_war"],
)
################################################################################
# DOMAIN REGISTRY :: LOCAL ENVIRONMENT
#
# The local environment only runs locally for testing and is never deployed.
registry_ear_file(
name = "registry_local_ear",
out = "registry_local.ear",
configs = {
"env/common/META-INF/appengine-application.xml": "META-INF/appengine-application.xml",
"env/common/META-INF/application.xml": "META-INF/application.xml",
},
wars = {
"registry_default_local.war": "default",
"registry_pubapi_local.war": "pubapi",
"registry_backend_local.war": "backend",
"registry_tools_local.war": "tools",
},
)
zip_file(
name = "registry_default_local_war",
srcs = [
"env/local/default/WEB-INF/appengine-web.xml",
],
out = "registry_default_local.war",
mappings = {
"domain_registry/java/google/registry/env/local/default": "",
},
deps = [":registry_default_war"],
)
zip_file(
name = "registry_pubapi_local_war",
srcs = [
"env/local/pubapi/WEB-INF/appengine-web.xml",
],
out = "registry_pubapi_local.war",
mappings = {
"domain_registry/java/google/registry/env/local/pubapi": "",
},
deps = [":registry_pubapi_war"],
)
zip_file(
name = "registry_backend_local_war",
srcs = [
"env/local/backend/WEB-INF/appengine-web.xml",
],
out = "registry_backend_local.war",
mappings = {
"domain_registry/java/google/registry/env/local/backend": "",
},
deps = [":registry_backend_war"],
)
zip_file(
name = "registry_tools_local_war",
srcs = [
"env/local/tools/WEB-INF/appengine-web.xml",
],
out = "registry_tools_local.war",
mappings = {
"domain_registry/java/google/registry/env/local/tools": "",
},
deps = [":registry_tools_war"],
)

View file

@ -0,0 +1,33 @@
package(
default_visibility = ["//visibility:public"],
)
licenses(["notice"]) # Apache 2.0
java_library(
name = "backup",
srcs = glob(["*.java"]),
deps = [
"//java/google/registry/config",
"//java/google/registry/cron",
"//java/google/registry/mapreduce",
"//java/google/registry/mapreduce/inputs",
"//java/google/registry/model",
"//java/google/registry/request",
"//java/google/registry/request/auth",
"//java/google/registry/util",
"//third_party/objectify:objectify-v4_1",
"@com_google_appengine_api_1_0_sdk",
"@com_google_appengine_tools_appengine_gcs_client",
"@com_google_appengine_tools_appengine_mapreduce",
"@com_google_auto_value",
"@com_google_code_findbugs_jsr305",
"@com_google_dagger",
"@com_google_flogger",
"@com_google_flogger_system_backend",
"@com_google_guava",
"@javax_inject",
"@javax_servlet_api",
"@joda_time",
],
)

View file

@ -0,0 +1,95 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import static com.google.appengine.api.ThreadManager.currentRequestThreadFactory;
import static com.google.common.util.concurrent.MoreExecutors.listeningDecorator;
import static google.registry.backup.ExportCommitLogDiffAction.LOWER_CHECKPOINT_TIME_PARAM;
import static google.registry.backup.ExportCommitLogDiffAction.UPPER_CHECKPOINT_TIME_PARAM;
import static google.registry.backup.RestoreCommitLogsAction.FROM_TIME_PARAM;
import static google.registry.backup.RestoreCommitLogsAction.TO_TIME_PARAM;
import static google.registry.request.RequestParameters.extractRequiredDatetimeParameter;
import static google.registry.request.RequestParameters.extractRequiredParameter;
import static java.util.concurrent.Executors.newFixedThreadPool;
import com.google.common.primitives.Ints;
import com.google.common.util.concurrent.ListeningExecutorService;
import dagger.Module;
import dagger.Provides;
import google.registry.cron.CommitLogFanoutAction;
import google.registry.request.HttpException.BadRequestException;
import google.registry.request.Parameter;
import java.lang.annotation.Documented;
import javax.inject.Qualifier;
import javax.servlet.http.HttpServletRequest;
import org.joda.time.DateTime;
/**
* Dagger module for backup package.
*
* @see "google.registry.module.backend.BackendComponent"
*/
@Module
public final class BackupModule {
/** Dagger qualifier for backups. */
@Qualifier
@Documented
public @interface Backups {}
/** Number of threads in the threaded executor. */
private static final int NUM_THREADS = 10;
@Provides
@Parameter("bucket")
static int provideBucket(HttpServletRequest req) {
String param = extractRequiredParameter(req, CommitLogFanoutAction.BUCKET_PARAM);
Integer bucket = Ints.tryParse(param);
if (bucket == null) {
throw new BadRequestException("Bad bucket id");
}
return bucket;
}
@Provides
@Parameter(LOWER_CHECKPOINT_TIME_PARAM)
static DateTime provideLowerCheckpointKey(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, LOWER_CHECKPOINT_TIME_PARAM);
}
@Provides
@Parameter(UPPER_CHECKPOINT_TIME_PARAM)
static DateTime provideUpperCheckpointKey(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, UPPER_CHECKPOINT_TIME_PARAM);
}
@Provides
@Parameter(FROM_TIME_PARAM)
static DateTime provideFromTime(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, FROM_TIME_PARAM);
}
@Provides
@Parameter(TO_TIME_PARAM)
static DateTime provideToTime(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, TO_TIME_PARAM);
}
@Provides
@Backups
static ListeningExecutorService provideListeningExecutorService() {
return listeningDecorator(newFixedThreadPool(NUM_THREADS, currentRequestThreadFactory()));
}
}

View file

@ -0,0 +1,74 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import static google.registry.model.ofy.ObjectifyService.ofy;
import com.google.appengine.api.datastore.EntityTranslator;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.ImmutableList;
import com.google.storage.onestore.v3.OnestoreEntity.EntityProto;
import google.registry.model.ImmutableObject;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Iterator;
/** Utilities for working with backups. */
public class BackupUtils {
/** Keys for user metadata fields on commit log files in GCS. */
public static final class GcsMetadataKeys {
private GcsMetadataKeys() {}
public static final String NUM_TRANSACTIONS = "num_transactions";
public static final String LOWER_BOUND_CHECKPOINT = "lower_bound_checkpoint";
public static final String UPPER_BOUND_CHECKPOINT = "upper_bound_checkpoint";
}
/**
* Converts the given {@link ImmutableObject} to a raw Datastore entity and write it to an
* {@link OutputStream} in delimited protocol buffer format.
*/
static void serializeEntity(ImmutableObject entity, OutputStream stream) throws IOException {
EntityTranslator.convertToPb(ofy().save().toEntity(entity)).writeDelimitedTo(stream);
}
/**
* Return an iterator of {@link ImmutableObject} instances deserialized from the given stream.
*
* <p>This parses out delimited protocol buffers for raw Datastore entities and then Ofy-loads
* those as {@link ImmutableObject}.
*
* <p>The iterator reads from the stream on demand, and as such will fail if the stream is closed.
*/
public static Iterator<ImmutableObject> createDeserializingIterator(final InputStream input) {
return new AbstractIterator<ImmutableObject>() {
@Override
protected ImmutableObject computeNext() {
EntityProto proto = new EntityProto();
if (proto.parseDelimitedFrom(input)) { // False means end of stream; other errors throw.
return ofy().load().fromEntity(EntityTranslator.createFromPb(proto));
}
return endOfData();
}};
}
public static ImmutableList<ImmutableObject> deserializeEntities(byte[] bytes) {
return ImmutableList.copyOf(createDeserializingIterator(new ByteArrayInputStream(bytes)));
}
}

View file

@ -0,0 +1,88 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static com.google.appengine.api.taskqueue.TaskOptions.Builder.withUrl;
import static google.registry.backup.ExportCommitLogDiffAction.LOWER_CHECKPOINT_TIME_PARAM;
import static google.registry.backup.ExportCommitLogDiffAction.UPPER_CHECKPOINT_TIME_PARAM;
import static google.registry.model.ofy.ObjectifyService.ofy;
import static google.registry.util.DateTimeUtils.isBeforeOrAt;
import com.google.common.flogger.FluentLogger;
import google.registry.model.ofy.CommitLogCheckpoint;
import google.registry.model.ofy.CommitLogCheckpointRoot;
import google.registry.request.Action;
import google.registry.request.auth.Auth;
import google.registry.util.Clock;
import google.registry.util.TaskQueueUtils;
import javax.inject.Inject;
import org.joda.time.DateTime;
/**
* Action that saves commit log checkpoints to Datastore and kicks off a diff export task.
*
* <p>We separate computing and saving the checkpoint from exporting it because the export to GCS is
* retryable but should not require the computation of a new checkpoint. Saving the checkpoint and
* enqueuing the export task are done transactionally, so any checkpoint that is saved will be
* exported to GCS very soon.
*
* <p>This action's supported method is GET rather than POST because it gets invoked via cron.
*/
@Action(
service = Action.Service.BACKEND,
path = "/_dr/cron/commitLogCheckpoint",
method = Action.Method.GET,
automaticallyPrintOk = true,
auth = Auth.AUTH_INTERNAL_ONLY)
public final class CommitLogCheckpointAction implements Runnable {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private static final String QUEUE_NAME = "export-commits";
@Inject Clock clock;
@Inject CommitLogCheckpointStrategy strategy;
@Inject TaskQueueUtils taskQueueUtils;
@Inject CommitLogCheckpointAction() {}
@Override
public void run() {
final CommitLogCheckpoint checkpoint = strategy.computeCheckpoint();
logger.atInfo().log(
"Generated candidate checkpoint for time: %s", checkpoint.getCheckpointTime());
ofy()
.transact(
() -> {
DateTime lastWrittenTime = CommitLogCheckpointRoot.loadRoot().getLastWrittenTime();
if (isBeforeOrAt(checkpoint.getCheckpointTime(), lastWrittenTime)) {
logger.atInfo().log(
"Newer checkpoint already written at time: %s", lastWrittenTime);
return;
}
ofy()
.saveWithoutBackup()
.entities(
checkpoint, CommitLogCheckpointRoot.create(checkpoint.getCheckpointTime()));
// Enqueue a diff task between previous and current checkpoints.
taskQueueUtils.enqueue(
getQueue(QUEUE_NAME),
withUrl(ExportCommitLogDiffAction.PATH)
.param(LOWER_CHECKPOINT_TIME_PARAM, lastWrittenTime.toString())
.param(
UPPER_CHECKPOINT_TIME_PARAM, checkpoint.getCheckpointTime().toString()));
});
}
}

View file

@ -0,0 +1,169 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import static com.google.common.collect.Iterables.getOnlyElement;
import static com.google.common.collect.Maps.transformValues;
import static google.registry.model.ofy.CommitLogBucket.getBucketKey;
import static google.registry.util.DateTimeUtils.END_OF_TIME;
import static google.registry.util.DateTimeUtils.earliestOf;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import com.googlecode.objectify.Key;
import google.registry.model.ofy.CommitLogBucket;
import google.registry.model.ofy.CommitLogCheckpoint;
import google.registry.model.ofy.CommitLogManifest;
import google.registry.model.ofy.Ofy;
import google.registry.util.Clock;
import java.util.List;
import java.util.Map.Entry;
import javax.inject.Inject;
import org.joda.time.DateTime;
/**
* Implementation of the procedure for determining point-in-time consistent commit log checkpoint.
*
* <p>This algorithm examines the recently written commit log data and uses a dual-read approach
* to determine a point-in-time consistent set of checkpoint times for the commit log buckets. By
* "consistent" we mean, generally speaking, that if the Datastore were restored by replaying all
* the commit logs up to the checkpoint times of the buckets, the result would be transactionally
* correct; there must be no "holes" where restored state depends on non-restored state.
*
* <p>The consistency guarantee really has two parts, only one of which is provided by this
* algorithm. The procedure below guarantees only that if the resulting checkpoint includes any
* given commit log, it will also include all the commit logs that were both 1) actually written
* before that commit log "in real life", and 2) have an earlier timestamp than that commit log.
* (These criteria do not necessarily imply each other, due to the lack of a global shared clock.)
* The rest of the guarantee comes from our Ofy customizations, which ensure that any transaction
* that depends on state from a previous transaction does indeed have a later timestamp.
*
* <h2>Procedure description</h2>
* <pre>
* {@code
* ComputeCheckpoint() -> returns a set consisting of a timestamp c(b_i) for every bucket b_i
*
* 1) read off the latest commit timestamp t(b_i) for every bucket b_i
* 2) iterate over the buckets b_i a second time, and
* a) do a consistent query for the next commit timestamp t'(b_i) where t'(b_i) > t(b_i)
* b) if present, add this timestamp t'(b_i) to a set S
* 3) compute a threshold time T* representing a time before all commits in S, as follows:
* a) if S is empty, let T* = + (or the "end of time")
* b) else, let T* = T - Δ, for T = min(S) and some small Δ > 0
* 4) return the set given by: min(t(b_i), T*) for all b_i
* }
* </pre>
*
* <h2>Correctness proof of algorithm</h2>
*
* <p>{@literal
* As described above, the algorithm is correct as long as it can ensure the following: given a
* commit log X written at time t(X) to bucket b_x, and another commit log Y that was written "in
* real life" before X and for which t(Y) < t(X), then if X is included in the checkpoint, so is Y;
* that is, t(X) <= c(b_x) implies t(Y) <= c(b_y).
* }
*
* <p>{@literal
* To prove this, first note that we always have c(b_i) <= t(b_i) for every b_i, i.e. every commit
* log included in the checkpoint must have been seen in the first pass. Hence if X was included,
* then X must have been written by the time we started the second pass. But since Y was written
* "in real life" prior to X, we must have seen Y by the second pass too.
* }
*
* <p>{@literal
* Now assume towards a contradiction that X is indeed included but Y is not, i.e. that we have
* t(X) <= c(b_x) but t(Y) > c(b_y). If Y was seen in the first pass, i.e. t(Y) <= t(b_y), then by
* our assumption c(b_y) < t(Y) <= t(b_y), and therefore c(b_y) != t(b_y). By the definition of
* c(b_y) it must then equal T*, so we have T* < t(Y). However, this is a contradiction since
* t(Y) < t(X) and t(X) <= c(b_x) <= T*. If instead Y was seen in the second pass but not the
* first, t'(b_y) exists and we must have t'(b_y) <= t(Y), but then since T* < T <= t'(b_y) by
* definition, we again reach the contradiction T* < t(Y).
* }
*/
class CommitLogCheckpointStrategy {
@Inject Ofy ofy;
@Inject Clock clock;
@Inject CommitLogCheckpointStrategy() {}
/** Compute and return a new CommitLogCheckpoint for the current point in time. */
public CommitLogCheckpoint computeCheckpoint() {
DateTime checkpointTime = clock.nowUtc();
ImmutableMap<Integer, DateTime> firstPassTimes = readBucketTimestamps();
DateTime threshold = readNewCommitLogsAndFindThreshold(firstPassTimes);
return CommitLogCheckpoint.create(
checkpointTime,
computeBucketCheckpointTimes(firstPassTimes, threshold));
}
/**
* Returns a map from all bucket IDs to their current last written time values, fetched without
* a transaction so with no guarantee of consistency across buckets.
*/
@VisibleForTesting
ImmutableMap<Integer, DateTime> readBucketTimestamps() {
// Use a fresh session cache so that we get the latest data from Datastore.
return ofy.doWithFreshSessionCache(
() ->
CommitLogBucket.loadAllBuckets()
.stream()
.collect(
ImmutableMap.toImmutableMap(
CommitLogBucket::getBucketNum, CommitLogBucket::getLastWrittenTime)));
}
/**
* Returns a threshold value defined as the latest timestamp that is before all new commit logs,
* where "new" means having a commit time after the per-bucket timestamp in the given map.
* When no such commit logs exist, the threshold value is set to END_OF_TIME.
*/
@VisibleForTesting
DateTime readNewCommitLogsAndFindThreshold(ImmutableMap<Integer, DateTime> bucketTimes) {
DateTime timeBeforeAllNewCommits = END_OF_TIME;
for (Entry<Integer, DateTime> entry : bucketTimes.entrySet()) {
Key<CommitLogBucket> bucketKey = getBucketKey(entry.getKey());
DateTime bucketTime = entry.getValue();
// Add 1 to handle START_OF_TIME since 0 isn't a valid id - filter then uses >= instead of >.
Key<CommitLogManifest> keyForFilter =
Key.create(CommitLogManifest.create(bucketKey, bucketTime.plusMillis(1), null));
List<Key<CommitLogManifest>> manifestKeys =
ofy.load()
.type(CommitLogManifest.class)
.ancestor(bucketKey)
.filterKey(">=", keyForFilter)
.limit(1)
.keys()
.list();
if (!manifestKeys.isEmpty()) {
timeBeforeAllNewCommits = earliestOf(
timeBeforeAllNewCommits,
CommitLogManifest.extractCommitTime(getOnlyElement(manifestKeys)).minusMillis(1));
}
}
return timeBeforeAllNewCommits;
}
/**
* Returns the bucket checkpoint times produced by clamping the given set of bucket timestamps to
* at most the given threshold value.
*/
@VisibleForTesting
ImmutableMap<Integer, DateTime> computeBucketCheckpointTimes(
ImmutableMap<Integer, DateTime> firstPassTimes,
final DateTime threshold) {
return ImmutableMap.copyOf(
transformValues(firstPassTimes, firstPassTime -> earliestOf(firstPassTime, threshold)));
}
}

View file

@ -0,0 +1,327 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import static google.registry.mapreduce.MapreduceRunner.PARAM_DRY_RUN;
import static google.registry.model.ofy.ObjectifyService.ofy;
import static java.lang.Boolean.FALSE;
import static java.lang.Boolean.TRUE;
import com.google.appengine.tools.mapreduce.Mapper;
import com.google.appengine.tools.mapreduce.Reducer;
import com.google.appengine.tools.mapreduce.ReducerInput;
import com.google.auto.value.AutoValue;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMultiset;
import com.google.common.flogger.FluentLogger;
import com.googlecode.objectify.Key;
import google.registry.config.RegistryConfig.Config;
import google.registry.mapreduce.MapreduceRunner;
import google.registry.mapreduce.inputs.CommitLogManifestInput;
import google.registry.mapreduce.inputs.EppResourceInputs;
import google.registry.model.EppResource;
import google.registry.model.ofy.CommitLogManifest;
import google.registry.model.ofy.CommitLogMutation;
import google.registry.model.translators.CommitLogRevisionsTranslatorFactory;
import google.registry.request.Action;
import google.registry.request.Parameter;
import google.registry.request.Response;
import google.registry.request.auth.Auth;
import google.registry.util.Clock;
import javax.inject.Inject;
import org.joda.time.DateTime;
import org.joda.time.Duration;
/**
* Task that garbage collects old {@link CommitLogManifest} entities.
*
* <p>Once commit logs have been written to GCS, we don't really need them in Datastore anymore,
* except to reconstruct point-in-time snapshots of the database. To make that possible, {@link
* EppResource}s have a {@link EppResource#getRevisions} method that returns the commit logs for
* older points in time. But that functionality is not useful after a certain amount of time, e.g.
* thirty days, so unneeded revisions are deleted (see {@link CommitLogRevisionsTranslatorFactory}).
* This leaves commit logs in the system that are unneeded (have no revisions pointing to them). So
* this task runs periodically to delete the "orphan" commit logs.
*
* <p>This action runs a mapreduce that goes over all existing {@link EppResource} and all {@link
* CommitLogManifest} older than commitLogDatastreRetention, and erases the commit logs aren't in an
* EppResource.
*/
@Action(
service = Action.Service.BACKEND,
path = "/_dr/task/deleteOldCommitLogs",
auth = Auth.AUTH_INTERNAL_ONLY)
public final class DeleteOldCommitLogsAction implements Runnable {
private static final int NUM_MAP_SHARDS = 20;
private static final int NUM_REDUCE_SHARDS = 10;
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
@Inject MapreduceRunner mrRunner;
@Inject Response response;
@Inject Clock clock;
@Inject @Config("commitLogDatastoreRetention") Duration maxAge;
@Inject @Parameter(PARAM_DRY_RUN) boolean isDryRun;
@Inject DeleteOldCommitLogsAction() {}
@Override
public void run() {
DateTime deletionThreshold = clock.nowUtc().minus(maxAge);
logger.atInfo().log(
"Processing asynchronous deletion of unreferenced CommitLogManifests older than %s",
deletionThreshold);
mrRunner
.setJobName("Delete old commit logs")
.setModuleName("backend")
.setDefaultMapShards(NUM_MAP_SHARDS)
.setDefaultReduceShards(NUM_REDUCE_SHARDS)
.runMapreduce(
new DeleteOldCommitLogsMapper(deletionThreshold),
new DeleteOldCommitLogsReducer(deletionThreshold, isDryRun),
ImmutableList.of(
new CommitLogManifestInput(deletionThreshold),
EppResourceInputs.createKeyInput(EppResource.class)))
.sendLinkToMapreduceConsole(response);
}
/**
* A mapper that iterates over all {@link EppResource} and {CommitLogManifest} entities.
*
* <p>It emits the target key and {@code false} for all revisions of each EppResources (meaning
* "don't delete this"), and {@code true} for all CommitLogRevisions (meaning "delete this").
*
* <p>The reducer will then delete all CommitLogRevisions that only have {@code true}.
*/
private static class DeleteOldCommitLogsMapper
extends Mapper<Key<?>, Key<CommitLogManifest>, Boolean> {
private static final long serialVersionUID = 8008689353479902948L;
private static final String KIND_MANIFEST = Key.getKind(CommitLogManifest.class);
private final DateTime threshold;
DeleteOldCommitLogsMapper(DateTime threshold) {
this.threshold = threshold;
}
@Override
public void map(final Key<?> key) {
// key is either a Key<CommitLogManifest> or a Key<? extends EppResource>.
//
// If it's a CommitLogManifest we just emit it as is (no need to load it).
if (key.getKind().equals(KIND_MANIFEST)) {
getContext().incrementCounter("old commit log manifests found");
// safe because we checked getKind
@SuppressWarnings("unchecked")
Key<CommitLogManifest> manifestKey = (Key<CommitLogManifest>) key;
emit(manifestKey, true);
return;
}
// If it isn't a Key<CommitLogManifest> then it should be an EppResource, which we need to
// load to emit the revisions.
//
Object object = ofy().load().key(key).now();
checkNotNull(object, "Received a key to a missing object. key: %s", key);
checkState(
object instanceof EppResource,
"Received a key to an object that isn't EppResource nor CommitLogManifest."
+ " Key: %s object type: %s",
key,
object.getClass().getName());
getContext().incrementCounter("EPP resources found");
EppResource eppResource = (EppResource) object;
if (eppResource.getCreationTime().isAfter(threshold)) {
getContext().incrementCounter("EPP resources newer than threshold");
}
for (Key<CommitLogManifest> manifestKey : eppResource.getRevisions().values()) {
emit(manifestKey, false);
}
getContext()
.incrementCounter("EPP resource revisions found", eppResource.getRevisions().size());
checkAndLogRevisionCoverageError(eppResource);
}
/**
* Check if given eppResource has the required revisions.
*
* <p>Revisions are used to recreate the state of the resource at a given day in the past
* "commitLogDatastoreRenention". To do that, we need at least one revision that's older than
* this duration (is dated before "threshold"), or at least one revision within a day of the
* resource's creation if it was created after the threshold.
*
* <p>Here we check that the given eppResource has the revisions it needs.
*
* <p>It's just a sanity check - since we're relying on the revisions to be correct for the
* deletion to work. We want to alert any problems we find in the revisions.
*
* <p>This really checks {@link CommitLogRevisionsTranslatorFactory#transformBeforeSave}.
* There's nothing we can do at this point to prevent the damage - we only report on it.
*/
private void checkAndLogRevisionCoverageError(EppResource eppResource) {
// First - check if there even are revisions
if (eppResource.getRevisions().isEmpty()) {
getContext().incrementCounter("EPP resources missing all revisions (SEE LOGS)");
logger.atSevere().log("EPP resource missing all revisions: %s", Key.create(eppResource));
return;
}
// Next, check if there's a revision that's older than "CommitLogDatastoreRetention". There
// should have been at least one at the time this resource was saved.
//
// Alternatively, if the resource is newer than the threshold - there should be at least one
// revision within a day of the creation time.
DateTime oldestRevisionDate = eppResource.getRevisions().firstKey();
if (oldestRevisionDate.isBefore(threshold)
|| oldestRevisionDate.isBefore(eppResource.getCreationTime().plusDays(1))) {
// We're OK!
return;
}
// The oldest revision date is newer than the threshold! This shouldn't happen.
getContext().incrementCounter("EPP resources missing pre-threshold revision (SEE LOGS)");
logger.atSevere().log(
"EPP resource missing old enough revision: "
+ "%s (created on %s) has %d revisions between %s and %s, while threshold is %s",
Key.create(eppResource),
eppResource.getCreationTime(),
eppResource.getRevisions().size(),
eppResource.getRevisions().firstKey(),
eppResource.getRevisions().lastKey(),
threshold);
// We want to see how bad it is though: if the difference is less than a day then this might
// still be OK (we only need logs for the end of the day). But if it's more than a day, then
// we are 100% sure we can't recreate all the history we need from the revisions.
Duration interval = new Duration(threshold, oldestRevisionDate);
if (interval.isLongerThan(Duration.standardDays(1))) {
getContext()
.incrementCounter("EPP resources missing pre-(threshold+1d) revision (SEE LOGS)");
}
}
}
/**
* Reducer that deletes unreferenced {@link CommitLogManifest} + child {@link CommitLogMutation}.
*
* <p>It receives the manifestKey to possibly delete, and a list of boolean 'verdicts' from
* various sources (the "old manifests" source and the "still referenced" source) on whether it's
* OK to delete this manifestKey. If even one source returns "false" (meaning "it's not OK to
* delete this manifest") then it won't be deleted.
*/
static class DeleteOldCommitLogsReducer
extends Reducer<Key<CommitLogManifest>, Boolean, Void> {
private static final long serialVersionUID = -4918760187627937268L;
private final DateTime deletionThreshold;
private final boolean isDryRun;
@AutoValue
abstract static class DeletionResult {
enum Status {
ALREADY_DELETED,
AFTER_THRESHOLD,
SUCCESS
}
public abstract Status status();
public abstract int numDeleted();
static DeletionResult create(Status status, int numDeleted) {
return
new AutoValue_DeleteOldCommitLogsAction_DeleteOldCommitLogsReducer_DeletionResult(
status, numDeleted);
}
}
DeleteOldCommitLogsReducer(DateTime deletionThreshold, boolean isDryRun) {
this.deletionThreshold = deletionThreshold;
this.isDryRun = isDryRun;
}
@Override
public void reduce(
final Key<CommitLogManifest> manifestKey,
ReducerInput<Boolean> canDeleteVerdicts) {
ImmutableMultiset<Boolean> canDeleteMultiset = ImmutableMultiset.copyOf(canDeleteVerdicts);
if (canDeleteMultiset.count(TRUE) > 1) {
getContext().incrementCounter("commit log manifests incorrectly mapped multiple times");
}
if (canDeleteMultiset.count(FALSE) > 1) {
getContext().incrementCounter("commit log manifests referenced multiple times");
}
if (canDeleteMultiset.contains(FALSE)) {
getContext().incrementCounter(
canDeleteMultiset.contains(TRUE)
? "old commit log manifests still referenced"
: "new (or nonexistent) commit log manifests referenced");
getContext().incrementCounter(
"EPP resource revisions handled",
canDeleteMultiset.count(FALSE));
return;
}
DeletionResult deletionResult = ofy().transactNew(() -> {
CommitLogManifest manifest = ofy().load().key(manifestKey).now();
// It is possible that the same manifestKey was run twice, if a shard had to be restarted
// or some weird failure. If this happens, we want to exit immediately.
// Note that this can never happen in dryRun.
if (manifest == null) {
return DeletionResult.create(DeletionResult.Status.ALREADY_DELETED, 0);
}
// Doing a sanity check on the date. This is the only place we use the CommitLogManifest,
// so maybe removing this test will improve performance. However, unless it's proven that
// the performance boost is significant (and we've tested this enough to be sure it never
// happens)- the safty of "let's not delete stuff we need from prod" is more important.
if (manifest.getCommitTime().isAfter(deletionThreshold)) {
return DeletionResult.create(DeletionResult.Status.AFTER_THRESHOLD, 0);
}
Iterable<Key<CommitLogMutation>> commitLogMutationKeys = ofy().load()
.type(CommitLogMutation.class)
.ancestor(manifestKey)
.keys()
.iterable();
ImmutableList<Key<?>> keysToDelete = ImmutableList.<Key<?>>builder()
.addAll(commitLogMutationKeys)
.add(manifestKey)
.build();
// Normally in a dry run we would log the entities that would be deleted, but those can
// number in the millions so we skip the logging.
if (!isDryRun) {
ofy().deleteWithoutBackup().keys(keysToDelete);
}
return DeletionResult.create(DeletionResult.Status.SUCCESS, keysToDelete.size());
});
switch (deletionResult.status()) {
case SUCCESS:
getContext().incrementCounter("old commit log manifests deleted");
getContext().incrementCounter("total entities deleted", deletionResult.numDeleted());
break;
case ALREADY_DELETED:
getContext().incrementCounter("attempts to delete an already deleted manifest");
break;
case AFTER_THRESHOLD:
logger.atSevere().log(
"Won't delete CommitLogManifest %s that is too recent.", manifestKey);
getContext().incrementCounter("manifests incorrectly assigned for deletion (SEE LOGS)");
break;
}
}
}
}

View file

@ -0,0 +1,217 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import static com.google.common.base.MoreObjects.firstNonNull;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Verify.verifyNotNull;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.collect.Iterables.concat;
import static com.google.common.collect.Lists.partition;
import static google.registry.backup.BackupUtils.GcsMetadataKeys.LOWER_BOUND_CHECKPOINT;
import static google.registry.backup.BackupUtils.GcsMetadataKeys.NUM_TRANSACTIONS;
import static google.registry.backup.BackupUtils.GcsMetadataKeys.UPPER_BOUND_CHECKPOINT;
import static google.registry.backup.BackupUtils.serializeEntity;
import static google.registry.model.ofy.CommitLogBucket.getBucketKey;
import static google.registry.model.ofy.ObjectifyService.ofy;
import static google.registry.util.DateTimeUtils.START_OF_TIME;
import static google.registry.util.DateTimeUtils.isAtOrAfter;
import static java.nio.channels.Channels.newOutputStream;
import static java.util.Comparator.comparingLong;
import com.google.appengine.tools.cloudstorage.GcsFileOptions;
import com.google.appengine.tools.cloudstorage.GcsFilename;
import com.google.appengine.tools.cloudstorage.GcsService;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Streams;
import com.google.common.flogger.FluentLogger;
import com.googlecode.objectify.Key;
import google.registry.config.RegistryConfig.Config;
import google.registry.model.ImmutableObject;
import google.registry.model.ofy.CommitLogBucket;
import google.registry.model.ofy.CommitLogCheckpoint;
import google.registry.model.ofy.CommitLogManifest;
import google.registry.model.ofy.CommitLogMutation;
import google.registry.request.Action;
import google.registry.request.Parameter;
import google.registry.request.auth.Auth;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import javax.annotation.Nullable;
import javax.inject.Inject;
import org.joda.time.DateTime;
/** Action that exports the diff between two commit log checkpoints to GCS. */
@Action(
service = Action.Service.BACKEND,
path = ExportCommitLogDiffAction.PATH,
method = Action.Method.POST,
automaticallyPrintOk = true,
auth = Auth.AUTH_INTERNAL_ONLY)
public final class ExportCommitLogDiffAction implements Runnable {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
static final String PATH = "/_dr/task/exportCommitLogDiff";
static final String UPPER_CHECKPOINT_TIME_PARAM = "upperCheckpointTime";
static final String LOWER_CHECKPOINT_TIME_PARAM = "lowerCheckpointTime";
public static final String DIFF_FILE_PREFIX = "commit_diff_until_";
@Inject GcsService gcsService;
@Inject @Config("commitLogGcsBucket") String gcsBucket;
@Inject @Config("commitLogDiffExportBatchSize") int batchSize;
@Inject @Parameter(LOWER_CHECKPOINT_TIME_PARAM) DateTime lowerCheckpointTime;
@Inject @Parameter(UPPER_CHECKPOINT_TIME_PARAM) DateTime upperCheckpointTime;
@Inject ExportCommitLogDiffAction() {}
@Override
public void run() {
logger.atInfo().log(
"Exporting commit log diffs between %s and %s.", lowerCheckpointTime, upperCheckpointTime);
checkArgument(isAtOrAfter(lowerCheckpointTime, START_OF_TIME));
checkArgument(lowerCheckpointTime.isBefore(upperCheckpointTime));
// Load the boundary checkpoints - lower is exclusive and may not exist (on the first export,
// when lowerCheckpointTime is START_OF_TIME), whereas the upper is inclusive and must exist.
CommitLogCheckpoint lowerCheckpoint = lowerCheckpointTime.isAfter(START_OF_TIME)
? verifyNotNull(ofy().load().key(CommitLogCheckpoint.createKey(lowerCheckpointTime)).now())
: null;
CommitLogCheckpoint upperCheckpoint =
verifyNotNull(ofy().load().key(CommitLogCheckpoint.createKey(upperCheckpointTime)).now());
// Load the keys of all the manifests to include in this diff.
List<Key<CommitLogManifest>> sortedKeys = loadAllDiffKeys(lowerCheckpoint, upperCheckpoint);
logger.atInfo().log("Found %d manifests to export", sortedKeys.size());
// Open an output channel to GCS, wrapped in a stream for convenience.
try (OutputStream gcsStream = newOutputStream(gcsService.createOrReplace(
new GcsFilename(gcsBucket, DIFF_FILE_PREFIX + upperCheckpointTime),
new GcsFileOptions.Builder()
.addUserMetadata(LOWER_BOUND_CHECKPOINT, lowerCheckpointTime.toString())
.addUserMetadata(UPPER_BOUND_CHECKPOINT, upperCheckpointTime.toString())
.addUserMetadata(NUM_TRANSACTIONS, Integer.toString(sortedKeys.size()))
.build()))) {
// Export the upper checkpoint itself.
serializeEntity(upperCheckpoint, gcsStream);
// If there are no manifests to export, stop early, now that we've written out the file with
// the checkpoint itself (which is needed for restores, even if it's empty).
if (sortedKeys.isEmpty()) {
return;
}
// Export to GCS in chunks, one per fixed batch of commit logs. While processing one batch,
// asynchronously load the entities for the next one.
List<List<Key<CommitLogManifest>>> keyChunks = partition(sortedKeys, batchSize);
// Objectify's map return type is asynchronous. Calling .values() will block until it loads.
Map<?, CommitLogManifest> nextChunkToExport = ofy().load().keys(keyChunks.get(0));
for (int i = 0; i < keyChunks.size(); i++) {
// Force the async load to finish.
Collection<CommitLogManifest> chunkValues = nextChunkToExport.values();
logger.atInfo().log("Loaded %d manifests", chunkValues.size());
// Since there is no hard bound on how much data this might be, take care not to let the
// Objectify session cache fill up and potentially run out of memory. This is the only safe
// point to do this since at this point there is no async load in progress.
ofy().clearSessionCache();
// Kick off the next async load, which can happen in parallel to the current GCS export.
if (i + 1 < keyChunks.size()) {
nextChunkToExport = ofy().load().keys(keyChunks.get(i + 1));
}
exportChunk(gcsStream, chunkValues);
logger.atInfo().log("Exported %d manifests", chunkValues.size());
}
} catch (IOException e) {
throw new RuntimeException(e);
}
logger.atInfo().log("Exported %d manifests in total", sortedKeys.size());
}
/**
* Loads all the diff keys, sorted in a transaction-consistent chronological order.
*
* @param lowerCheckpoint exclusive lower bound on keys in this diff, or null if no lower bound
* @param upperCheckpoint inclusive upper bound on keys in this diff
*/
private ImmutableList<Key<CommitLogManifest>> loadAllDiffKeys(
@Nullable final CommitLogCheckpoint lowerCheckpoint,
final CommitLogCheckpoint upperCheckpoint) {
// Fetch the keys (no data) between these checkpoints, and sort by timestamp. This ordering is
// transaction-consistent by virtue of our checkpoint strategy and our customized Ofy; see
// CommitLogCheckpointStrategy for the proof. We break ties by sorting on bucket ID to ensure
// a deterministic order.
return upperCheckpoint
.getBucketTimestamps()
.keySet()
.stream()
.flatMap(
bucketNum ->
Streams.stream(loadDiffKeysFromBucket(lowerCheckpoint, upperCheckpoint, bucketNum)))
.sorted(
comparingLong(Key<CommitLogManifest>::getId)
.thenComparingLong(a -> a.getParent().getId()))
.collect(toImmutableList());
}
/**
* Loads the diff keys for one bucket.
*
* @param lowerCheckpoint exclusive lower bound on keys in this diff, or null if no lower bound
* @param upperCheckpoint inclusive upper bound on keys in this diff
* @param bucketNum the bucket to load diff keys from
*/
private Iterable<Key<CommitLogManifest>> loadDiffKeysFromBucket(
@Nullable CommitLogCheckpoint lowerCheckpoint,
CommitLogCheckpoint upperCheckpoint,
int bucketNum) {
// If no lower checkpoint exists, or if it exists but had no timestamp for this bucket number
// (because the bucket count was increased between these checkpoints), then use START_OF_TIME
// as the effective exclusive lower bound.
DateTime lowerCheckpointBucketTime =
firstNonNull(
(lowerCheckpoint == null) ? null : lowerCheckpoint.getBucketTimestamps().get(bucketNum),
START_OF_TIME);
// Since START_OF_TIME=0 is not a valid id in a key, add 1 to both bounds. Then instead of
// loading lowerBound < x <= upperBound, we can load lowerBound <= x < upperBound.
DateTime lowerBound = lowerCheckpointBucketTime.plusMillis(1);
DateTime upperBound = upperCheckpoint.getBucketTimestamps().get(bucketNum).plusMillis(1);
// If the lower and upper bounds are equal, there can't be any results, so skip the query.
if (lowerBound.equals(upperBound)) {
return ImmutableSet.of();
}
Key<CommitLogBucket> bucketKey = getBucketKey(bucketNum);
return ofy().load()
.type(CommitLogManifest.class)
.ancestor(bucketKey)
.filterKey(">=", CommitLogManifest.createKey(bucketKey, lowerBound))
.filterKey("<", CommitLogManifest.createKey(bucketKey, upperBound))
.keys();
}
/** Writes a chunks-worth of manifests and associated mutations to GCS. */
private void exportChunk(OutputStream gcsStream, Collection<CommitLogManifest> chunk)
throws IOException {
// Kickoff async loads for all the manifests in the chunk.
ImmutableList.Builder<Iterable<? extends ImmutableObject>> entities =
new ImmutableList.Builder<>();
for (CommitLogManifest manifest : chunk) {
entities.add(ImmutableList.of(manifest));
entities.add(ofy().load().type(CommitLogMutation.class).ancestor(manifest));
}
for (ImmutableObject entity : concat(entities.build())) {
serializeEntity(entity, gcsStream);
}
}
}

View file

@ -0,0 +1,185 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import static com.google.common.base.Preconditions.checkState;
import static google.registry.backup.BackupUtils.GcsMetadataKeys.LOWER_BOUND_CHECKPOINT;
import static google.registry.backup.ExportCommitLogDiffAction.DIFF_FILE_PREFIX;
import static google.registry.util.DateTimeUtils.START_OF_TIME;
import static google.registry.util.DateTimeUtils.isBeforeOrAt;
import static google.registry.util.DateTimeUtils.latestOf;
import com.google.appengine.tools.cloudstorage.GcsFileMetadata;
import com.google.appengine.tools.cloudstorage.GcsFilename;
import com.google.appengine.tools.cloudstorage.GcsService;
import com.google.appengine.tools.cloudstorage.ListItem;
import com.google.appengine.tools.cloudstorage.ListOptions;
import com.google.common.collect.ImmutableList;
import com.google.common.flogger.FluentLogger;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import google.registry.backup.BackupModule.Backups;
import google.registry.config.RegistryConfig.Config;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import java.util.TreeMap;
import javax.annotation.Nullable;
import javax.inject.Inject;
import org.joda.time.DateTime;
/** Utility class to list commit logs diff files stored on GCS. */
class GcsDiffFileLister {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
@Inject GcsService gcsService;
@Inject @Config("commitLogGcsBucket") String gcsBucket;
@Inject @Backups ListeningExecutorService executor;
@Inject GcsDiffFileLister() {}
/**
* Traverses the sequence of diff files backwards from checkpointTime and inserts the file
* metadata into "sequence". Returns true if a complete sequence was discovered, false if one or
* more files are missing.
*/
private boolean constructDiffSequence(
Map<DateTime, ListenableFuture<GcsFileMetadata>> upperBoundTimesToMetadata,
DateTime fromTime,
DateTime lastTime,
TreeMap<DateTime, GcsFileMetadata> sequence) {
DateTime checkpointTime = lastTime;
while (isBeforeOrAt(fromTime, checkpointTime)) {
GcsFileMetadata metadata;
if (upperBoundTimesToMetadata.containsKey(checkpointTime)) {
metadata = Futures.getUnchecked(upperBoundTimesToMetadata.get(checkpointTime));
} else {
String filename = DIFF_FILE_PREFIX + checkpointTime;
logger.atInfo().log("Patching GCS list; discovered file: %s", filename);
metadata = getMetadata(filename);
// If we hit a gap, quit.
if (metadata == null) {
logger.atInfo().log(
"Gap discovered in sequence terminating at %s, missing file: %s",
sequence.lastKey(), filename);
logger.atInfo().log("Found sequence from %s to %s", checkpointTime, lastTime);
return false;
}
}
sequence.put(checkpointTime, metadata);
checkpointTime = getLowerBoundTime(metadata);
}
logger.atInfo().log("Found sequence from %s to %s", checkpointTime, lastTime);
return true;
}
ImmutableList<GcsFileMetadata> listDiffFiles(DateTime fromTime, @Nullable DateTime toTime) {
logger.atInfo().log("Requested restore from time: %s", fromTime);
if (toTime != null) {
logger.atInfo().log(" Until time: %s", toTime);
}
// List all of the diff files on GCS and build a map from each file's upper checkpoint time
// (extracted from the filename) to its asynchronously-loaded metadata, keeping only files with
// an upper checkpoint time > fromTime.
TreeMap<DateTime, ListenableFuture<GcsFileMetadata>> upperBoundTimesToMetadata
= new TreeMap<>();
Iterator<ListItem> listItems;
try {
// TODO(b/23554360): Use a smarter prefixing strategy to speed this up.
listItems = gcsService.list(
gcsBucket,
new ListOptions.Builder().setPrefix(DIFF_FILE_PREFIX).build());
} catch (IOException e) {
throw new RuntimeException(e);
}
DateTime lastUpperBoundTime = START_OF_TIME;
while (listItems.hasNext()) {
final String filename = listItems.next().getName();
DateTime upperBoundTime = DateTime.parse(filename.substring(DIFF_FILE_PREFIX.length()));
if (isInRange(upperBoundTime, fromTime, toTime)) {
upperBoundTimesToMetadata.put(upperBoundTime, executor.submit(() -> getMetadata(filename)));
lastUpperBoundTime = latestOf(upperBoundTime, lastUpperBoundTime);
}
}
if (upperBoundTimesToMetadata.isEmpty()) {
logger.atInfo().log("No files found");
return ImmutableList.of();
}
// Reconstruct the sequence of files by traversing backwards from "lastUpperBoundTime" (i.e. the
// last file that we found) and finding its previous file until we either run out of files or
// get to one that preceeds "fromTime".
//
// GCS file listing is eventually consistent, so it's possible that we are missing a file. The
// metadata of a file is sufficient to identify the preceding file, so if we start from the
// last file and work backwards we can verify that we have no holes in our chain (although we
// may be missing files at the end).
TreeMap<DateTime, GcsFileMetadata> sequence = new TreeMap<>();
logger.atInfo().log("Restoring until: %s", lastUpperBoundTime);
boolean inconsistentFileSet = !constructDiffSequence(
upperBoundTimesToMetadata, fromTime, lastUpperBoundTime, sequence);
// Verify that all of the elements in the original set are represented in the sequence. If we
// find anything that's not represented, construct a sequence for it.
boolean checkForMoreExtraDiffs = true; // Always loop at least once.
while (checkForMoreExtraDiffs) {
checkForMoreExtraDiffs = false;
for (DateTime key : upperBoundTimesToMetadata.descendingKeySet()) {
if (!isInRange(key, fromTime, toTime)) {
break;
}
if (!sequence.containsKey(key)) {
constructDiffSequence(upperBoundTimesToMetadata, fromTime, key, sequence);
checkForMoreExtraDiffs = true;
inconsistentFileSet = true;
break;
}
}
}
checkState(
!inconsistentFileSet,
"Unable to compute commit diff history, there are either gaps or forks in the history "
+ "file set. Check log for details.");
logger.atInfo().log(
"Actual restore from time: %s", getLowerBoundTime(sequence.firstEntry().getValue()));
logger.atInfo().log("Found %d files to restore", sequence.size());
return ImmutableList.copyOf(sequence.values());
}
/**
* Returns true if 'time' is in range of 'start' and 'end'.
*
* <p>If 'end' is null, returns true if 'time' is after 'start'.
*/
private boolean isInRange(DateTime time, DateTime start, @Nullable DateTime end) {
return isBeforeOrAt(start, time) && (end == null || isBeforeOrAt(time, end));
}
private DateTime getLowerBoundTime(GcsFileMetadata metadata) {
return DateTime.parse(metadata.getOptions().getUserMetadata().get(LOWER_BOUND_CHECKPOINT));
}
private GcsFileMetadata getMetadata(String filename) {
try {
return gcsService.getMetadata(new GcsFilename(gcsBucket, filename));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}

View file

@ -0,0 +1,191 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.collect.Iterators.peekingIterator;
import static google.registry.backup.BackupUtils.createDeserializingIterator;
import static google.registry.model.ofy.ObjectifyService.ofy;
import com.google.appengine.api.datastore.DatastoreService;
import com.google.appengine.api.datastore.Entity;
import com.google.appengine.api.datastore.EntityTranslator;
import com.google.appengine.tools.cloudstorage.GcsFileMetadata;
import com.google.appengine.tools.cloudstorage.GcsService;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.PeekingIterator;
import com.google.common.collect.Streams;
import com.google.common.flogger.FluentLogger;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.Result;
import com.googlecode.objectify.util.ResultNow;
import google.registry.config.RegistryEnvironment;
import google.registry.model.ImmutableObject;
import google.registry.model.ofy.CommitLogBucket;
import google.registry.model.ofy.CommitLogCheckpoint;
import google.registry.model.ofy.CommitLogCheckpointRoot;
import google.registry.model.ofy.CommitLogManifest;
import google.registry.model.ofy.CommitLogMutation;
import google.registry.request.Action;
import google.registry.request.Parameter;
import google.registry.request.auth.Auth;
import google.registry.util.Retrier;
import java.io.IOException;
import java.io.InputStream;
import java.nio.channels.Channels;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Stream;
import javax.inject.Inject;
import org.joda.time.DateTime;
/** Restore Registry 2 commit logs from GCS to Datastore. */
@Action(
service = Action.Service.TOOLS,
path = RestoreCommitLogsAction.PATH,
method = Action.Method.POST,
automaticallyPrintOk = true,
auth = Auth.AUTH_INTERNAL_OR_ADMIN)
public class RestoreCommitLogsAction implements Runnable {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
static final int BLOCK_SIZE = 1024 * 1024; // Buffer 1mb at a time, for no particular reason.
public static final String PATH = "/_dr/task/restoreCommitLogs";
static final String DRY_RUN_PARAM = "dryRun";
static final String FROM_TIME_PARAM = "fromTime";
static final String TO_TIME_PARAM = "toTime";
@Inject GcsService gcsService;
@Inject @Parameter(DRY_RUN_PARAM) boolean dryRun;
@Inject @Parameter(FROM_TIME_PARAM) DateTime fromTime;
@Inject @Parameter(TO_TIME_PARAM) DateTime toTime;
@Inject DatastoreService datastoreService;
@Inject GcsDiffFileLister diffLister;
@Inject Retrier retrier;
@Inject RestoreCommitLogsAction() {}
@Override
public void run() {
checkArgument(
RegistryEnvironment.get() == RegistryEnvironment.ALPHA
|| RegistryEnvironment.get() == RegistryEnvironment.CRASH
|| RegistryEnvironment.get() == RegistryEnvironment.UNITTEST,
"DO NOT RUN ANYWHERE ELSE EXCEPT ALPHA, CRASH OR TESTS.");
if (dryRun) {
logger.atInfo().log("Running in dryRun mode");
}
List<GcsFileMetadata> diffFiles = diffLister.listDiffFiles(fromTime, toTime);
if (diffFiles.isEmpty()) {
logger.atInfo().log("Nothing to restore");
return;
}
Map<Integer, DateTime> bucketTimestamps = new HashMap<>();
CommitLogCheckpoint lastCheckpoint = null;
for (GcsFileMetadata metadata : diffFiles) {
logger.atInfo().log("Restoring: %s", metadata.getFilename().getObjectName());
try (InputStream input = Channels.newInputStream(
gcsService.openPrefetchingReadChannel(metadata.getFilename(), 0, BLOCK_SIZE))) {
PeekingIterator<ImmutableObject> commitLogs =
peekingIterator(createDeserializingIterator(input));
lastCheckpoint = (CommitLogCheckpoint) commitLogs.next();
saveOfy(ImmutableList.of(lastCheckpoint)); // Save the checkpoint itself.
while (commitLogs.hasNext()) {
CommitLogManifest manifest = restoreOneTransaction(commitLogs);
bucketTimestamps.put(manifest.getBucketId(), manifest.getCommitTime());
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
// Restore the CommitLogCheckpointRoot and CommitLogBuckets.
saveOfy(
Streams.concat(
bucketTimestamps
.entrySet()
.stream()
.map(
entry ->
new CommitLogBucket.Builder()
.setBucketNum(entry.getKey())
.setLastWrittenTime(entry.getValue())
.build()),
Stream.of(CommitLogCheckpointRoot.create(lastCheckpoint.getCheckpointTime())))
.collect(toImmutableList()));
logger.atInfo().log("Restore complete");
}
/**
* Restore the contents of one transaction to Datastore.
*
* <p>The objects to delete are listed in the {@link CommitLogManifest}, which will be the first
* object in the iterable. The objects to save follow, each as a {@link CommitLogMutation}. We
* restore by deleting the deletes and recreating the saves from their proto form. We also save
* the commit logs themselves back to Datastore, so that the commit log system itself is
* transparently restored alongside the data.
*
* @return the manifest, for use in restoring the {@link CommitLogBucket}.
*/
private CommitLogManifest restoreOneTransaction(PeekingIterator<ImmutableObject> commitLogs) {
final CommitLogManifest manifest = (CommitLogManifest) commitLogs.next();
Result<?> deleteResult = deleteAsync(manifest.getDeletions());
List<Entity> entitiesToSave = Lists.newArrayList(ofy().save().toEntity(manifest));
while (commitLogs.hasNext() && commitLogs.peek() instanceof CommitLogMutation) {
CommitLogMutation mutation = (CommitLogMutation) commitLogs.next();
entitiesToSave.add(ofy().save().toEntity(mutation));
entitiesToSave.add(EntityTranslator.createFromPbBytes(mutation.getEntityProtoBytes()));
}
saveRaw(entitiesToSave);
try {
deleteResult.now();
} catch (Exception e) {
retrier.callWithRetry(
() -> deleteAsync(manifest.getDeletions()).now(), RuntimeException.class);
}
return manifest;
}
private void saveRaw(List<Entity> entitiesToSave) {
if (dryRun) {
logger.atInfo().log("Would have saved entities: %s", entitiesToSave);
return;
}
retrier.callWithRetry(() -> datastoreService.put(entitiesToSave), RuntimeException.class);
}
private void saveOfy(Iterable<? extends ImmutableObject> objectsToSave) {
if (dryRun) {
logger.atInfo().log("Would have saved entities: %s", objectsToSave);
return;
}
retrier.callWithRetry(
() -> ofy().saveWithoutBackup().entities(objectsToSave).now(), RuntimeException.class);
}
private Result<?> deleteAsync(Set<Key<?>> keysToDelete) {
if (dryRun) {
logger.atInfo().log("Would have deleted entities: %s", keysToDelete);
}
return dryRun || keysToDelete.isEmpty()
? new ResultNow<Void>(null)
: ofy().deleteWithoutBackup().keys(keysToDelete);
}
}

View file

@ -0,0 +1,16 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
@javax.annotation.ParametersAreNonnullByDefault
package google.registry.backup;

View file

@ -0,0 +1,167 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.batch;
import static com.google.common.base.Preconditions.checkArgument;
import static google.registry.util.DateTimeUtils.isBeforeOrAt;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.appengine.api.taskqueue.TaskOptions.Method;
import com.google.appengine.api.taskqueue.TransientFailureException;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSortedSet;
import com.google.common.flogger.FluentLogger;
import com.googlecode.objectify.Key;
import google.registry.config.RegistryConfig.Config;
import google.registry.model.EppResource;
import google.registry.model.ImmutableObject;
import google.registry.model.eppcommon.Trid;
import google.registry.model.host.HostResource;
import google.registry.util.AppEngineServiceUtils;
import google.registry.util.Retrier;
import javax.inject.Inject;
import javax.inject.Named;
import org.joda.time.DateTime;
import org.joda.time.Duration;
/** Helper class to enqueue tasks for handling asynchronous operations in flows. */
public final class AsyncTaskEnqueuer {
/** The HTTP parameter names used by async flows. */
public static final String PARAM_RESOURCE_KEY = "resourceKey";
public static final String PARAM_REQUESTING_CLIENT_ID = "requestingClientId";
public static final String PARAM_CLIENT_TRANSACTION_ID = "clientTransactionId";
public static final String PARAM_SERVER_TRANSACTION_ID = "serverTransactionId";
public static final String PARAM_IS_SUPERUSER = "isSuperuser";
public static final String PARAM_HOST_KEY = "hostKey";
public static final String PARAM_REQUESTED_TIME = "requestedTime";
public static final String PARAM_RESAVE_TIMES = "resaveTimes";
/** The task queue names used by async flows. */
public static final String QUEUE_ASYNC_ACTIONS = "async-actions";
public static final String QUEUE_ASYNC_DELETE = "async-delete-pull";
public static final String QUEUE_ASYNC_HOST_RENAME = "async-host-rename-pull";
public static final String PATH_RESAVE_ENTITY = "/_dr/task/resaveEntity";
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private static final Duration MAX_ASYNC_ETA = Duration.standardDays(30);
private final Duration asyncDeleteDelay;
private final Queue asyncActionsPushQueue;
private final Queue asyncDeletePullQueue;
private final Queue asyncDnsRefreshPullQueue;
private final AppEngineServiceUtils appEngineServiceUtils;
private final Retrier retrier;
@Inject
public AsyncTaskEnqueuer(
@Named(QUEUE_ASYNC_ACTIONS) Queue asyncActionsPushQueue,
@Named(QUEUE_ASYNC_DELETE) Queue asyncDeletePullQueue,
@Named(QUEUE_ASYNC_HOST_RENAME) Queue asyncDnsRefreshPullQueue,
@Config("asyncDeleteFlowMapreduceDelay") Duration asyncDeleteDelay,
AppEngineServiceUtils appEngineServiceUtils,
Retrier retrier) {
this.asyncActionsPushQueue = asyncActionsPushQueue;
this.asyncDeletePullQueue = asyncDeletePullQueue;
this.asyncDnsRefreshPullQueue = asyncDnsRefreshPullQueue;
this.asyncDeleteDelay = asyncDeleteDelay;
this.appEngineServiceUtils = appEngineServiceUtils;
this.retrier = retrier;
}
/** Enqueues a task to asynchronously re-save an entity at some point in the future. */
public void enqueueAsyncResave(
ImmutableObject entityToResave, DateTime now, DateTime whenToResave) {
enqueueAsyncResave(entityToResave, now, ImmutableSortedSet.of(whenToResave));
}
/**
* Enqueues a task to asynchronously re-save an entity at some point(s) in the future.
*
* <p>Multiple re-save times are chained one after the other, i.e. any given run will re-enqueue
* itself to run at the next time if there are remaining re-saves scheduled.
*/
public void enqueueAsyncResave(
ImmutableObject entityToResave, DateTime now, ImmutableSortedSet<DateTime> whenToResave) {
DateTime firstResave = whenToResave.first();
checkArgument(isBeforeOrAt(now, firstResave), "Can't enqueue a resave to run in the past");
Key<ImmutableObject> entityKey = Key.create(entityToResave);
Duration etaDuration = new Duration(now, firstResave);
if (etaDuration.isLongerThan(MAX_ASYNC_ETA)) {
logger.atInfo().log(
"Ignoring async re-save of %s; %s is past the ETA threshold of %s.",
entityKey, firstResave, MAX_ASYNC_ETA);
return;
}
logger.atInfo().log("Enqueuing async re-save of %s to run at %s.", entityKey, whenToResave);
String backendHostname = appEngineServiceUtils.getServiceHostname("backend");
TaskOptions task =
TaskOptions.Builder.withUrl(PATH_RESAVE_ENTITY)
.method(Method.POST)
.header("Host", backendHostname)
.countdownMillis(etaDuration.getMillis())
.param(PARAM_RESOURCE_KEY, entityKey.getString())
.param(PARAM_REQUESTED_TIME, now.toString());
if (whenToResave.size() > 1) {
task.param(PARAM_RESAVE_TIMES, Joiner.on(',').join(whenToResave.tailSet(firstResave, false)));
}
addTaskToQueueWithRetry(asyncActionsPushQueue, task);
}
/** Enqueues a task to asynchronously delete a contact or host, by key. */
public void enqueueAsyncDelete(
EppResource resourceToDelete,
DateTime now,
String requestingClientId,
Trid trid,
boolean isSuperuser) {
Key<EppResource> resourceKey = Key.create(resourceToDelete);
logger.atInfo().log(
"Enqueuing async deletion of %s on behalf of registrar %s.",
resourceKey, requestingClientId);
TaskOptions task =
TaskOptions.Builder.withMethod(Method.PULL)
.countdownMillis(asyncDeleteDelay.getMillis())
.param(PARAM_RESOURCE_KEY, resourceKey.getString())
.param(PARAM_REQUESTING_CLIENT_ID, requestingClientId)
.param(PARAM_SERVER_TRANSACTION_ID, trid.getServerTransactionId())
.param(PARAM_IS_SUPERUSER, Boolean.toString(isSuperuser))
.param(PARAM_REQUESTED_TIME, now.toString());
trid.getClientTransactionId()
.ifPresent(clTrid -> task.param(PARAM_CLIENT_TRANSACTION_ID, clTrid));
addTaskToQueueWithRetry(asyncDeletePullQueue, task);
}
/** Enqueues a task to asynchronously refresh DNS for a renamed host. */
public void enqueueAsyncDnsRefresh(HostResource host, DateTime now) {
Key<HostResource> hostKey = Key.create(host);
logger.atInfo().log("Enqueuing async DNS refresh for renamed host %s.", hostKey);
addTaskToQueueWithRetry(
asyncDnsRefreshPullQueue,
TaskOptions.Builder.withMethod(Method.PULL)
.param(PARAM_HOST_KEY, hostKey.getString())
.param(PARAM_REQUESTED_TIME, now.toString()));
}
/**
* Adds a task to a queue with retrying, to avoid aborting the entire flow over a transient issue
* enqueuing a task.
*/
private void addTaskToQueueWithRetry(final Queue queue, final TaskOptions task) {
retrier.callWithRetry(() -> queue.add(task), TransientFailureException.class);
}
}

View file

@ -0,0 +1,165 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.batch;
import static com.google.appengine.api.taskqueue.QueueConstants.maxLeaseCount;
import static com.google.monitoring.metrics.EventMetric.DEFAULT_FITTER;
import static google.registry.batch.AsyncTaskMetrics.OperationType.CONTACT_AND_HOST_DELETE;
import static google.registry.batch.AsyncTaskMetrics.OperationType.DNS_REFRESH;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableSet;
import com.google.common.flogger.FluentLogger;
import com.google.monitoring.metrics.DistributionFitter;
import com.google.monitoring.metrics.EventMetric;
import com.google.monitoring.metrics.FibonacciFitter;
import com.google.monitoring.metrics.IncrementableMetric;
import com.google.monitoring.metrics.LabelDescriptor;
import com.google.monitoring.metrics.MetricRegistryImpl;
import google.registry.util.Clock;
import javax.inject.Inject;
import org.joda.time.DateTime;
import org.joda.time.Duration;
/**
* Instrumentation for async flows (contact/host deletion and DNS refreshes).
*
* @see AsyncTaskEnqueuer
*/
public class AsyncTaskMetrics {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private final Clock clock;
@Inject
public AsyncTaskMetrics(Clock clock) {
this.clock = clock;
}
/**
* A Fibonacci fitter used for bucketing the batch count.
*
* <p>We use a Fibonacci filter because it provides better resolution at the low end than an
* exponential fitter, which is important because most batch sizes are likely to be very low,
* despite going up to 1,000 on the high end. Also, the precision is better, as batch size is
* inherently an integer, whereas an exponential fitter with an exponent base less than 2 would
* have unintuitive boundaries.
*/
private static final DistributionFitter FITTER_BATCH_SIZE =
FibonacciFitter.create(maxLeaseCount());
private static final ImmutableSet<LabelDescriptor> LABEL_DESCRIPTORS =
ImmutableSet.of(
LabelDescriptor.create("operation_type", "The type of async flow operation."),
LabelDescriptor.create("result", "The result of the async flow operation."));
@VisibleForTesting
static final IncrementableMetric asyncFlowOperationCounts =
MetricRegistryImpl.getDefault()
.newIncrementableMetric(
"/async_flows/operations",
"Count of Async Flow Operations",
"count",
LABEL_DESCRIPTORS);
@VisibleForTesting
static final EventMetric asyncFlowOperationProcessingTime =
MetricRegistryImpl.getDefault()
.newEventMetric(
"/async_flows/processing_time",
"Async Flow Processing Time",
"milliseconds",
LABEL_DESCRIPTORS,
DEFAULT_FITTER);
@VisibleForTesting
static final EventMetric asyncFlowBatchSize =
MetricRegistryImpl.getDefault()
.newEventMetric(
"/async_flows/batch_size",
"Async Operation Batch Size",
"batch size",
ImmutableSet.of(
LabelDescriptor.create("operation_type", "The type of async flow operation.")),
FITTER_BATCH_SIZE);
/** The type of asynchronous operation. */
public enum OperationType {
CONTACT_DELETE("contactDelete"),
HOST_DELETE("hostDelete"),
CONTACT_AND_HOST_DELETE("contactAndHostDelete"),
DNS_REFRESH("dnsRefresh");
private final String metricLabelValue;
OperationType(String metricLabelValue) {
this.metricLabelValue = metricLabelValue;
}
String getMetricLabelValue() {
return metricLabelValue;
}
}
/** The result of an asynchronous operation. */
public enum OperationResult {
/** The operation processed correctly and the result was success. */
SUCCESS("success"),
/** The operation processed correctly and the result was failure. */
FAILURE("failure"),
/** The operation did not process correctly due to some unexpected error. */
ERROR("error"),
/** The operation was skipped because the request is now stale. */
STALE("stale");
private final String metricLabelValue;
OperationResult(String metricLabelValue) {
this.metricLabelValue = metricLabelValue;
}
String getMetricLabelValue() {
return metricLabelValue;
}
}
public void recordAsyncFlowResult(
OperationType operationType, OperationResult operationResult, DateTime whenEnqueued) {
asyncFlowOperationCounts.increment(
operationType.getMetricLabelValue(), operationResult.getMetricLabelValue());
long processingMillis = new Duration(whenEnqueued, clock.nowUtc()).getMillis();
asyncFlowOperationProcessingTime.record(
processingMillis,
operationType.getMetricLabelValue(),
operationResult.getMetricLabelValue());
logger.atInfo().log(
"Asynchronous %s operation took %d ms to process, yielding result: %s.",
operationType.getMetricLabelValue(),
processingMillis,
operationResult.getMetricLabelValue());
}
public void recordContactHostDeletionBatchSize(long batchSize) {
asyncFlowBatchSize.record(batchSize, CONTACT_AND_HOST_DELETE.getMetricLabelValue());
}
public void recordDnsRefreshBatchSize(long batchSize) {
asyncFlowBatchSize.record(batchSize, DNS_REFRESH.getMetricLabelValue());
}
}

View file

@ -0,0 +1,38 @@
package(
default_visibility = ["//visibility:public"],
)
licenses(["notice"]) # Apache 2.0
java_library(
name = "batch",
srcs = glob(["*.java"]),
deps = [
"//java/google/registry/config",
"//java/google/registry/dns",
"//java/google/registry/mapreduce",
"//java/google/registry/mapreduce/inputs",
"//java/google/registry/model",
"//java/google/registry/pricing",
"//java/google/registry/request",
"//java/google/registry/request/auth",
"//java/google/registry/util",
"//third_party/objectify:objectify-v4_1",
"@com_google_appengine_api_1_0_sdk",
"@com_google_appengine_tools_appengine_gcs_client",
"@com_google_appengine_tools_appengine_mapreduce",
"@com_google_appengine_tools_appengine_pipeline",
"@com_google_auto_factory",
"@com_google_auto_value",
"@com_google_code_findbugs_jsr305",
"@com_google_dagger",
"@com_google_flogger",
"@com_google_flogger_system_backend",
"@com_google_guava",
"@com_google_http_client",
"@com_google_monitoring_client_metrics",
"@javax_servlet_api",
"@joda_time",
"@org_joda_money",
],
)

View file

@ -0,0 +1,114 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.batch;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static google.registry.batch.AsyncTaskEnqueuer.PARAM_REQUESTED_TIME;
import static google.registry.batch.AsyncTaskEnqueuer.PARAM_RESAVE_TIMES;
import static google.registry.batch.AsyncTaskEnqueuer.PARAM_RESOURCE_KEY;
import static google.registry.batch.AsyncTaskEnqueuer.QUEUE_ASYNC_ACTIONS;
import static google.registry.batch.AsyncTaskEnqueuer.QUEUE_ASYNC_DELETE;
import static google.registry.batch.AsyncTaskEnqueuer.QUEUE_ASYNC_HOST_RENAME;
import static google.registry.request.RequestParameters.extractOptionalBooleanParameter;
import static google.registry.request.RequestParameters.extractOptionalIntParameter;
import static google.registry.request.RequestParameters.extractOptionalParameter;
import static google.registry.request.RequestParameters.extractRequiredDatetimeParameter;
import static google.registry.request.RequestParameters.extractRequiredParameter;
import static google.registry.request.RequestParameters.extractSetOfDatetimeParameters;
import com.google.appengine.api.taskqueue.Queue;
import com.google.common.collect.ImmutableSet;
import com.googlecode.objectify.Key;
import dagger.Module;
import dagger.Provides;
import google.registry.model.ImmutableObject;
import google.registry.request.Parameter;
import java.util.Optional;
import javax.inject.Named;
import javax.servlet.http.HttpServletRequest;
import org.joda.time.DateTime;
/**
* Dagger module for injecting common settings for batch actions.
*/
@Module
public class BatchModule {
@Provides
@Parameter("jobName")
static Optional<String> provideJobName(HttpServletRequest req) {
return extractOptionalParameter(req, "jobName");
}
@Provides
@Parameter("jobId")
static Optional<String> provideJobId(HttpServletRequest req) {
return extractOptionalParameter(req, "jobId");
}
@Provides
@Parameter("numJobsToDelete")
static Optional<Integer> provideNumJobsToDelete(HttpServletRequest req) {
return extractOptionalIntParameter(req, "numJobsToDelete");
}
@Provides
@Parameter("daysOld")
static Optional<Integer> provideDaysOld(HttpServletRequest req) {
return extractOptionalIntParameter(req, "daysOld");
}
@Provides
@Parameter("force")
static Optional<Boolean> provideForce(HttpServletRequest req) {
return extractOptionalBooleanParameter(req, "force");
}
@Provides
@Parameter(PARAM_RESOURCE_KEY)
static Key<ImmutableObject> provideResourceKey(HttpServletRequest req) {
return Key.create(extractRequiredParameter(req, PARAM_RESOURCE_KEY));
}
@Provides
@Parameter(PARAM_REQUESTED_TIME)
static DateTime provideRequestedTime(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, PARAM_REQUESTED_TIME);
}
@Provides
@Parameter(PARAM_RESAVE_TIMES)
static ImmutableSet<DateTime> provideResaveTimes(HttpServletRequest req) {
return extractSetOfDatetimeParameters(req, PARAM_RESAVE_TIMES);
}
@Provides
@Named(QUEUE_ASYNC_ACTIONS)
static Queue provideAsyncActionsPushQueue() {
return getQueue(QUEUE_ASYNC_ACTIONS);
}
@Provides
@Named(QUEUE_ASYNC_DELETE)
static Queue provideAsyncDeletePullQueue() {
return getQueue(QUEUE_ASYNC_DELETE);
}
@Provides
@Named(QUEUE_ASYNC_HOST_RENAME)
static Queue provideAsyncHostRenamePullQueue() {
return getQueue(QUEUE_ASYNC_HOST_RENAME);
}
}

View file

@ -0,0 +1,610 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.batch;
import static com.google.appengine.api.taskqueue.QueueConstants.maxLeaseCount;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.math.IntMath.divide;
import static com.googlecode.objectify.Key.getKind;
import static google.registry.batch.AsyncTaskEnqueuer.PARAM_CLIENT_TRANSACTION_ID;
import static google.registry.batch.AsyncTaskEnqueuer.PARAM_IS_SUPERUSER;
import static google.registry.batch.AsyncTaskEnqueuer.PARAM_REQUESTED_TIME;
import static google.registry.batch.AsyncTaskEnqueuer.PARAM_REQUESTING_CLIENT_ID;
import static google.registry.batch.AsyncTaskEnqueuer.PARAM_RESOURCE_KEY;
import static google.registry.batch.AsyncTaskEnqueuer.PARAM_SERVER_TRANSACTION_ID;
import static google.registry.batch.AsyncTaskEnqueuer.QUEUE_ASYNC_DELETE;
import static google.registry.model.EppResourceUtils.isActive;
import static google.registry.model.EppResourceUtils.isDeleted;
import static google.registry.model.ResourceTransferUtils.denyPendingTransfer;
import static google.registry.model.ResourceTransferUtils.handlePendingTransferOnDelete;
import static google.registry.model.ResourceTransferUtils.updateForeignKeyIndexDeletionTime;
import static google.registry.model.eppcommon.StatusValue.PENDING_DELETE;
import static google.registry.model.ofy.ObjectifyService.ofy;
import static google.registry.model.reporting.HistoryEntry.Type.CONTACT_DELETE;
import static google.registry.model.reporting.HistoryEntry.Type.CONTACT_DELETE_FAILURE;
import static google.registry.model.reporting.HistoryEntry.Type.HOST_DELETE;
import static google.registry.model.reporting.HistoryEntry.Type.HOST_DELETE_FAILURE;
import static google.registry.model.transfer.TransferStatus.SERVER_CANCELLED;
import static java.math.RoundingMode.CEILING;
import static java.util.concurrent.TimeUnit.DAYS;
import static java.util.concurrent.TimeUnit.SECONDS;
import static java.util.logging.Level.INFO;
import static java.util.logging.Level.SEVERE;
import static org.joda.time.Duration.standardHours;
import com.google.appengine.api.taskqueue.LeaseOptions;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.appengine.api.taskqueue.TransientFailureException;
import com.google.appengine.tools.mapreduce.Mapper;
import com.google.appengine.tools.mapreduce.Reducer;
import com.google.appengine.tools.mapreduce.ReducerInput;
import com.google.auto.value.AutoValue;
import com.google.common.collect.HashMultiset;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterators;
import com.google.common.collect.Multiset;
import com.google.common.flogger.FluentLogger;
import com.googlecode.objectify.Key;
import google.registry.batch.AsyncTaskMetrics.OperationResult;
import google.registry.batch.AsyncTaskMetrics.OperationType;
import google.registry.batch.DeleteContactsAndHostsAction.DeletionResult.Type;
import google.registry.dns.DnsQueue;
import google.registry.mapreduce.MapreduceRunner;
import google.registry.mapreduce.UnlockerOutput;
import google.registry.mapreduce.inputs.EppResourceInputs;
import google.registry.mapreduce.inputs.NullInput;
import google.registry.model.EppResource;
import google.registry.model.ImmutableObject;
import google.registry.model.annotations.ExternalMessagingName;
import google.registry.model.contact.ContactResource;
import google.registry.model.domain.DomainBase;
import google.registry.model.eppcommon.StatusValue;
import google.registry.model.eppcommon.Trid;
import google.registry.model.eppoutput.EppResponse.ResponseData;
import google.registry.model.host.HostResource;
import google.registry.model.poll.PendingActionNotificationResponse.ContactPendingActionNotificationResponse;
import google.registry.model.poll.PendingActionNotificationResponse.HostPendingActionNotificationResponse;
import google.registry.model.poll.PollMessage;
import google.registry.model.reporting.HistoryEntry;
import google.registry.model.server.Lock;
import google.registry.request.Action;
import google.registry.request.Response;
import google.registry.request.auth.Auth;
import google.registry.util.Clock;
import google.registry.util.NonFinalForTesting;
import google.registry.util.RequestStatusChecker;
import google.registry.util.Retrier;
import google.registry.util.SystemClock;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.logging.Level;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.inject.Named;
import org.joda.time.DateTime;
import org.joda.time.Duration;
/**
* A mapreduce that processes batch asynchronous deletions of contact and host resources by mapping
* over all domains and checking for any references to the contacts/hosts in pending deletion.
*/
@Action(
service = Action.Service.BACKEND,
path = "/_dr/task/deleteContactsAndHosts",
auth = Auth.AUTH_INTERNAL_ONLY)
public class DeleteContactsAndHostsAction implements Runnable {
static final String KIND_CONTACT = getKind(ContactResource.class);
static final String KIND_HOST = getKind(HostResource.class);
private static final Duration LEASE_LENGTH = standardHours(4);
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private static final int MAX_REDUCE_SHARDS = 50;
private static final int DELETES_PER_SHARD = 5;
@Inject AsyncTaskMetrics asyncTaskMetrics;
@Inject Clock clock;
@Inject MapreduceRunner mrRunner;
@Inject @Named(QUEUE_ASYNC_DELETE) Queue queue;
@Inject RequestStatusChecker requestStatusChecker;
@Inject Response response;
@Inject Retrier retrier;
@Inject DeleteContactsAndHostsAction() {}
@Override
public void run() {
// Check if the lock can be acquired, and if not, a previous run of this mapreduce is still
// executing, so return early.
Optional<Lock> lock =
Lock.acquire(
DeleteContactsAndHostsAction.class.getSimpleName(),
null,
LEASE_LENGTH,
requestStatusChecker,
false);
if (!lock.isPresent()) {
logRespondAndUnlock(INFO, "Can't acquire lock; aborting.", lock);
return;
}
// Lease the async tasks to process.
LeaseOptions options =
LeaseOptions.Builder.withCountLimit(maxLeaseCount())
.leasePeriod(LEASE_LENGTH.getStandardSeconds(), SECONDS);
List<TaskHandle> tasks = queue.leaseTasks(options);
asyncTaskMetrics.recordContactHostDeletionBatchSize(tasks.size());
// Check if there are no tasks to process, and if so, return early.
if (tasks.isEmpty()) {
logRespondAndUnlock(INFO, "No contact/host deletion tasks in pull queue; finishing.", lock);
return;
}
Multiset<String> kindCounts = HashMultiset.create(2);
ImmutableList.Builder<DeletionRequest> builder = new ImmutableList.Builder<>();
ImmutableList.Builder<Key<? extends EppResource>> resourceKeys = new ImmutableList.Builder<>();
final List<DeletionRequest> requestsToDelete = new ArrayList<>();
for (TaskHandle task : tasks) {
try {
DeletionRequest deletionRequest = DeletionRequest.createFromTask(task, clock.nowUtc());
if (deletionRequest.isDeletionAllowed()) {
builder.add(deletionRequest);
resourceKeys.add(deletionRequest.key());
kindCounts.add(deletionRequest.key().getKind());
} else {
requestsToDelete.add(deletionRequest);
}
} catch (Exception e) {
logger.atSevere().withCause(e).log(
"Could not parse async deletion request, delaying task for a day: %s", task);
// Grab the lease for a whole day, so that it won't continue throwing errors every five
// minutes.
queue.modifyTaskLease(task, 1L, DAYS);
}
}
deleteStaleTasksWithRetry(requestsToDelete);
ImmutableList<DeletionRequest> deletionRequests = builder.build();
if (deletionRequests.isEmpty()) {
logRespondAndUnlock(
INFO, "No async deletions to process because all were already handled.", lock);
} else {
logger.atInfo().log(
"Processing asynchronous deletion of %d contacts and %d hosts: %s",
kindCounts.count(KIND_CONTACT), kindCounts.count(KIND_HOST), resourceKeys.build());
runMapreduce(deletionRequests, lock);
}
}
/**
* Deletes a list of tasks associated with deletion requests from the async delete queue using a
* retrier.
*/
private void deleteStaleTasksWithRetry(final List<DeletionRequest> deletionRequests) {
if (deletionRequests.isEmpty()) {
return;
}
final List<TaskHandle> tasks =
deletionRequests.stream().map(DeletionRequest::task).collect(toImmutableList());
retrier.callWithRetry(() -> queue.deleteTask(tasks), TransientFailureException.class);
deletionRequests.forEach(
deletionRequest ->
asyncTaskMetrics.recordAsyncFlowResult(
deletionRequest.getMetricOperationType(),
OperationResult.STALE,
deletionRequest.requestedTime()));
}
private void runMapreduce(ImmutableList<DeletionRequest> deletionRequests, Optional<Lock> lock) {
try {
int numReducers =
Math.min(MAX_REDUCE_SHARDS, divide(deletionRequests.size(), DELETES_PER_SHARD, CEILING));
mrRunner
.setJobName("Check for EPP resource references and then delete")
.setModuleName("backend")
.setDefaultReduceShards(numReducers)
.runMapreduce(
new DeleteContactsAndHostsMapper(deletionRequests),
new DeleteEppResourceReducer(),
ImmutableList.of(
// Add an extra shard that maps over a null domain. See the mapper code for why.
new NullInput<>(), EppResourceInputs.createEntityInput(DomainBase.class)),
new UnlockerOutput<Void>(lock.get()))
.sendLinkToMapreduceConsole(response);
} catch (Throwable t) {
logRespondAndUnlock(SEVERE, "Error starting mapreduce to delete contacts/hosts.", lock);
}
}
private void logRespondAndUnlock(Level level, String message, Optional<Lock> lock) {
logger.at(level).log(message);
response.setPayload(message);
lock.ifPresent(Lock::release);
}
/**
* A mapper that iterates over all {@link DomainBase} entities.
*
* <p>It emits the target key and {@code true} for domains referencing the target resource. For
* the special input of {@code null} it emits the target key and {@code false}.
*/
public static class DeleteContactsAndHostsMapper
extends Mapper<DomainBase, DeletionRequest, Boolean> {
private static final long serialVersionUID = -253652818502690537L;
private final ImmutableList<DeletionRequest> deletionRequests;
DeleteContactsAndHostsMapper(ImmutableList<DeletionRequest> resourcesToDelete) {
this.deletionRequests = resourcesToDelete;
}
@Override
public void map(DomainBase domain) {
for (DeletionRequest deletionRequest : deletionRequests) {
if (domain == null) {
// The reducer only runs if at least one value is emitted. We add a null input to the
// mapreduce and emit one 'false' for each deletion request so that the reducer always
// runs for each requested deletion (so that it can finish up tasks if nothing else).
emit(deletionRequest, false);
} else if (isActive(domain, deletionRequest.lastUpdateTime())
&& isLinked(domain, deletionRequest.key())) {
emit(deletionRequest, true);
getContext()
.incrementCounter(
String.format("active Domain-%s links found", deletionRequest.key().getKind()));
}
}
if (domain != null) {
getContext().incrementCounter("domains processed");
}
}
/** Determine whether the target resource is a linked resource on the domain. */
private boolean isLinked(DomainBase domain, Key<? extends EppResource> resourceKey) {
if (resourceKey.getKind().equals(KIND_CONTACT)) {
return domain.getReferencedContacts().contains(resourceKey);
} else if (resourceKey.getKind().equals(KIND_HOST)) {
return domain.getNameservers().contains(resourceKey);
} else {
throw new IllegalStateException("EPP resource key of unknown type: " + resourceKey);
}
}
}
/**
* A reducer that checks if the EPP resource to be deleted is referenced anywhere, and then
* deletes it if not and unmarks it for deletion if so.
*/
public static class DeleteEppResourceReducer
extends Reducer<DeletionRequest, Boolean, Void> {
private static final long serialVersionUID = 6569363449285506326L;
private static final DnsQueue dnsQueue = DnsQueue.create();
@NonFinalForTesting
private static AsyncTaskMetrics asyncTaskMetrics = new AsyncTaskMetrics(new SystemClock());
@Override
public void reduce(final DeletionRequest deletionRequest, ReducerInput<Boolean> values) {
final boolean hasNoActiveReferences = !Iterators.contains(values, true);
logger.atInfo().log("Processing async deletion request for %s", deletionRequest.key());
DeletionResult result =
ofy()
.transactNew(
() -> {
DeletionResult deletionResult =
attemptToDeleteResource(deletionRequest, hasNoActiveReferences);
getQueue(QUEUE_ASYNC_DELETE).deleteTask(deletionRequest.task());
return deletionResult;
});
asyncTaskMetrics.recordAsyncFlowResult(
deletionRequest.getMetricOperationType(),
result.getMetricOperationResult(),
deletionRequest.requestedTime());
String resourceNamePlural = deletionRequest.key().getKind() + "s";
getContext().incrementCounter(result.type().renderCounterText(resourceNamePlural));
logger.atInfo().log(
"Result of async deletion for resource %s: %s",
deletionRequest.key(), result.pollMessageText());
}
private DeletionResult attemptToDeleteResource(
DeletionRequest deletionRequest, boolean hasNoActiveReferences) {
DateTime now = ofy().getTransactionTime();
EppResource resource =
ofy().load().key(deletionRequest.key()).now().cloneProjectedAtTime(now);
// Double-check transactionally that the resource is still active and in PENDING_DELETE.
if (!doesResourceStateAllowDeletion(resource, now)) {
return DeletionResult.create(Type.ERRORED, "");
}
// Contacts and external hosts have a direct client id. For subordinate hosts it needs to be
// read off of the superordinate domain.
String resourceClientId = resource.getPersistedCurrentSponsorClientId();
if (resource instanceof HostResource && ((HostResource) resource).isSubordinate()) {
resourceClientId =
ofy().load().key(((HostResource) resource).getSuperordinateDomain()).now()
.cloneProjectedAtTime(now)
.getCurrentSponsorClientId();
}
boolean requestedByCurrentOwner =
resourceClientId.equals(deletionRequest.requestingClientId());
boolean deleteAllowed =
hasNoActiveReferences && (requestedByCurrentOwner || deletionRequest.isSuperuser());
String resourceTypeName =
resource.getClass().getAnnotation(ExternalMessagingName.class).value();
String pollMessageText =
deleteAllowed
? String.format("Deleted %s %s.", resourceTypeName, resource.getForeignKey())
: String.format(
"Can't delete %s %s because %s.",
resourceTypeName,
resource.getForeignKey(),
requestedByCurrentOwner
? "it is referenced by a domain"
: "it was transferred prior to deletion");
HistoryEntry historyEntry =
new HistoryEntry.Builder()
.setClientId(deletionRequest.requestingClientId())
.setModificationTime(now)
.setType(getHistoryEntryType(resource, deleteAllowed))
.setParent(deletionRequest.key())
.build();
PollMessage.OneTime pollMessage =
new PollMessage.OneTime.Builder()
.setClientId(deletionRequest.requestingClientId())
.setMsg(pollMessageText)
.setParent(historyEntry)
.setEventTime(now)
.setResponseData(
getPollMessageResponseData(deletionRequest, resource, deleteAllowed, now))
.build();
EppResource resourceToSave;
if (deleteAllowed) {
EppResource.Builder<?, ?> resourceToSaveBuilder;
if (resource instanceof ContactResource) {
ContactResource contact = (ContactResource) resource;
// Handle pending transfers on contact deletion.
if (contact.getStatusValues().contains(StatusValue.PENDING_TRANSFER)) {
contact =
denyPendingTransfer(
contact, SERVER_CANCELLED, now, deletionRequest.requestingClientId());
}
// Wipe out PII on contact deletion.
resourceToSaveBuilder = contact.asBuilder().wipeOut();
} else {
resourceToSaveBuilder = resource.asBuilder();
}
resourceToSave = resourceToSaveBuilder.setDeletionTime(now).setStatusValues(null).build();
performDeleteTasks(resource, resourceToSave, now, historyEntry);
updateForeignKeyIndexDeletionTime(resourceToSave);
} else {
resourceToSave = resource.asBuilder().removeStatusValue(PENDING_DELETE).build();
}
ofy().save().<ImmutableObject>entities(resourceToSave, historyEntry, pollMessage);
return DeletionResult.create(
deleteAllowed ? Type.DELETED : Type.NOT_DELETED, pollMessageText);
}
private static ImmutableList<? extends ResponseData> getPollMessageResponseData(
DeletionRequest deletionRequest,
EppResource resource,
boolean deleteAllowed,
DateTime now) {
@Nullable String clientTransactionId = deletionRequest.clientTransactionId();
String serverTransactionId = deletionRequest.serverTransactionId();
Trid trid = Trid.create(clientTransactionId, serverTransactionId);
if (resource instanceof HostResource) {
return ImmutableList.of(
HostPendingActionNotificationResponse.create(
((HostResource) resource).getFullyQualifiedHostName(), deleteAllowed, trid, now));
} else if (resource instanceof ContactResource) {
return ImmutableList.of(
ContactPendingActionNotificationResponse.create(
((ContactResource) resource).getContactId(), deleteAllowed, trid, now));
} else {
throw new IllegalStateException("EPP resource of unknown type " + Key.create(resource));
}
}
/**
* Determine the proper history entry type for the delete operation, as a function of
* whether or not the delete was successful.
*/
private HistoryEntry.Type getHistoryEntryType(EppResource resource, boolean successfulDelete) {
if (resource instanceof ContactResource) {
return successfulDelete ? CONTACT_DELETE : CONTACT_DELETE_FAILURE;
} else if (resource instanceof HostResource) {
return successfulDelete ? HOST_DELETE : HOST_DELETE_FAILURE;
} else {
throw new IllegalStateException("EPP resource of unknown type: " + Key.create(resource));
}
}
/** Perform any type-specific tasks on the resource to be deleted (and/or its dependencies). */
private void performDeleteTasks(
EppResource existingResource,
EppResource deletedResource,
DateTime deletionTime,
HistoryEntry historyEntryForDelete) {
if (existingResource instanceof ContactResource) {
handlePendingTransferOnDelete(
(ContactResource) existingResource,
(ContactResource) deletedResource,
deletionTime,
historyEntryForDelete);
} else if (existingResource instanceof HostResource) {
HostResource host = (HostResource) existingResource;
if (host.isSubordinate()) {
dnsQueue.addHostRefreshTask(host.getFullyQualifiedHostName());
ofy().save().entity(
ofy().load().key(host.getSuperordinateDomain()).now().asBuilder()
.removeSubordinateHost(host.getFullyQualifiedHostName())
.build());
}
} else {
throw new IllegalStateException(
"EPP resource of unknown type: " + Key.create(existingResource));
}
}
}
/** A class that encapsulates the values of a request to delete a contact or host. */
@AutoValue
abstract static class DeletionRequest implements Serializable {
private static final long serialVersionUID = -4612618525760839240L;
abstract Key<? extends EppResource> key();
abstract DateTime lastUpdateTime();
/**
* The client id of the registrar that requested this deletion (which might NOT be the same as
* the actual current owner of the resource).
*/
abstract String requestingClientId();
/** First half of TRID for the original request, split for serializability. */
@Nullable
abstract String clientTransactionId();
/** Second half of TRID for the original request, split for serializability. */
abstract String serverTransactionId();
abstract boolean isSuperuser();
abstract DateTime requestedTime();
abstract boolean isDeletionAllowed();
abstract TaskHandle task();
@AutoValue.Builder
abstract static class Builder {
abstract Builder setKey(Key<? extends EppResource> key);
abstract Builder setLastUpdateTime(DateTime lastUpdateTime);
abstract Builder setRequestingClientId(String requestingClientId);
abstract Builder setClientTransactionId(@Nullable String clientTransactionId);
abstract Builder setServerTransactionId(String serverTransactionId);
abstract Builder setIsSuperuser(boolean isSuperuser);
abstract Builder setRequestedTime(DateTime requestedTime);
abstract Builder setIsDeletionAllowed(boolean isDeletionAllowed);
abstract Builder setTask(TaskHandle task);
abstract DeletionRequest build();
}
static DeletionRequest createFromTask(TaskHandle task, DateTime now)
throws Exception {
ImmutableMap<String, String> params = ImmutableMap.copyOf(task.extractParams());
Key<EppResource> resourceKey =
Key.create(
checkNotNull(params.get(PARAM_RESOURCE_KEY), "Resource to delete not specified"));
EppResource resource =
checkNotNull(ofy().load().key(resourceKey).now(), "Resource to delete doesn't exist");
checkState(
resource instanceof ContactResource || resource instanceof HostResource,
"Cannot delete a %s via this action",
resource.getClass().getSimpleName());
return new AutoValue_DeleteContactsAndHostsAction_DeletionRequest.Builder()
.setKey(resourceKey)
.setLastUpdateTime(resource.getUpdateAutoTimestamp().getTimestamp())
.setRequestingClientId(
checkNotNull(
params.get(PARAM_REQUESTING_CLIENT_ID), "Requesting client id not specified"))
// Note that client transaction ID is optional, in which case this sets it to null.
.setClientTransactionId(params.get(PARAM_CLIENT_TRANSACTION_ID))
.setServerTransactionId(
checkNotNull(
params.get(PARAM_SERVER_TRANSACTION_ID), "Server transaction id not specified"))
.setIsSuperuser(
Boolean.parseBoolean(
checkNotNull(params.get(PARAM_IS_SUPERUSER), "Is superuser not specified")))
.setRequestedTime(
DateTime.parse(
checkNotNull(params.get(PARAM_REQUESTED_TIME), "Requested time not specified")))
.setIsDeletionAllowed(doesResourceStateAllowDeletion(resource, now))
.setTask(task)
.build();
}
OperationType getMetricOperationType() {
if (key().getKind().equals(KIND_CONTACT)) {
return OperationType.CONTACT_DELETE;
} else if (key().getKind().equals(KIND_HOST)) {
return OperationType.HOST_DELETE;
} else {
throw new IllegalStateException(
String.format(
"Cannot determine async operation type for metric for resource %s", key()));
}
}
}
/** A class that encapsulates the values resulting from attempted contact/host deletion. */
@AutoValue
abstract static class DeletionResult {
enum Type {
DELETED("%s deleted", OperationResult.SUCCESS),
NOT_DELETED("%s not deleted", OperationResult.FAILURE),
ERRORED("%s errored out during deletion", OperationResult.ERROR);
private final String counterFormat;
private final OperationResult operationResult;
Type(String counterFormat, OperationResult operationResult) {
this.counterFormat = counterFormat;
this.operationResult = operationResult;
}
String renderCounterText(String resourceName) {
return String.format(counterFormat, resourceName);
}
}
abstract Type type();
abstract String pollMessageText();
static DeletionResult create(Type type, String pollMessageText) {
return new AutoValue_DeleteContactsAndHostsAction_DeletionResult(type, pollMessageText);
}
OperationResult getMetricOperationResult() {
return type().operationResult;
}
}
static boolean doesResourceStateAllowDeletion(EppResource resource, DateTime now) {
Key<EppResource> key = Key.create(resource);
if (isDeleted(resource, now)) {
logger.atWarning().log("Cannot asynchronously delete %s because it is already deleted", key);
return false;
}
if (!resource.getStatusValues().contains(PENDING_DELETE)) {
logger.atWarning().log(
"Cannot asynchronously delete %s because it is not in PENDING_DELETE", key);
return false;
}
return true;
}
}

View file

@ -0,0 +1,149 @@
// Copyright 2018 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.batch;
import static com.google.common.base.Preconditions.checkState;
import static google.registry.config.RegistryEnvironment.PRODUCTION;
import static google.registry.mapreduce.MapreduceRunner.PARAM_DRY_RUN;
import static google.registry.mapreduce.inputs.EppResourceInputs.createEntityInput;
import static google.registry.model.ofy.ObjectifyService.ofy;
import static google.registry.request.Action.Method.POST;
import com.google.appengine.tools.mapreduce.Mapper;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.flogger.FluentLogger;
import com.googlecode.objectify.Key;
import google.registry.config.RegistryEnvironment;
import google.registry.mapreduce.MapreduceRunner;
import google.registry.model.EppResource;
import google.registry.model.contact.ContactResource;
import google.registry.model.host.HostResource;
import google.registry.model.index.EppResourceIndex;
import google.registry.model.index.ForeignKeyIndex;
import google.registry.request.Action;
import google.registry.request.Parameter;
import google.registry.request.Response;
import google.registry.request.auth.Auth;
import java.util.List;
import javax.inject.Inject;
/**
* Hard deletes load-test ContactResources, HostResources, their subordinate history entries, and
* the associated ForeignKey and EppResourceIndex entities.
*
* <p>This only deletes contacts and hosts, NOT domains. To delete domains, use {@link
* DeleteLoadTestDataAction} and pass it the TLD(s) that the load test domains were created on. Note
* that DeleteLoadTestDataAction is safe enough to run in production whereas this mapreduce is not,
* but this one does not need to be runnable in production because load testing isn't run against
* production.
*/
@Action(
service = Action.Service.BACKEND,
path = "/_dr/task/deleteLoadTestData",
method = POST,
auth = Auth.AUTH_INTERNAL_ONLY)
public class DeleteLoadTestDataAction implements Runnable {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
/**
* The registrars for which to wipe out all contacts/hosts.
*
* <p>This is hard-coded because it's too dangerous to specify as a request parameter. By putting
* it in code it always has to go through code review.
*/
private static final ImmutableSet<String> LOAD_TEST_REGISTRARS = ImmutableSet.of("proxy");
@Inject
@Parameter(PARAM_DRY_RUN)
boolean isDryRun;
@Inject MapreduceRunner mrRunner;
@Inject Response response;
@Inject RegistryEnvironment registryEnvironment;
@Inject
DeleteLoadTestDataAction() {}
@Override
public void run() {
// This mapreduce doesn't guarantee that foreign key relations are preserved, so isn't safe to
// run on production. On other environments, data is fully wiped out occasionally anyway, so
// having some broken data that isn't referred to isn't the end of the world.
checkState(
registryEnvironment != PRODUCTION, "This mapreduce is not safe to run on PRODUCTION.");
mrRunner
.setJobName("Delete load test data")
.setModuleName("backend")
.runMapOnly(
new DeleteLoadTestDataMapper(isDryRun),
ImmutableList.of(
createEntityInput(ContactResource.class), createEntityInput(HostResource.class)))
.sendLinkToMapreduceConsole(response);
}
/** Provides the map method that runs for each existing contact and host entity. */
public static class DeleteLoadTestDataMapper extends Mapper<EppResource, Void, Void> {
private static final long serialVersionUID = -3817710674062432694L;
private final boolean isDryRun;
public DeleteLoadTestDataMapper(boolean isDryRun) {
this.isDryRun = isDryRun;
}
@Override
public final void map(EppResource resource) {
if (LOAD_TEST_REGISTRARS.contains(resource.getPersistedCurrentSponsorClientId())) {
deleteResource(resource);
getContext()
.incrementCounter(
String.format("deleted %s entities", resource.getClass().getSimpleName()));
} else {
getContext().incrementCounter("skipped, not load test data");
}
}
private void deleteResource(EppResource resource) {
final Key<EppResourceIndex> eppIndex =
Key.create(EppResourceIndex.create(Key.create(resource)));
final Key<? extends ForeignKeyIndex<?>> fki = ForeignKeyIndex.createKey(resource);
int numEntitiesDeleted =
ofy()
.transact(
() -> {
// This ancestor query selects all descendant entities.
List<Key<Object>> resourceAndDependentKeys =
ofy().load().ancestor(resource).keys().list();
ImmutableSet<Key<?>> allKeys =
new ImmutableSet.Builder<Key<?>>()
.add(fki)
.add(eppIndex)
.addAll(resourceAndDependentKeys)
.build();
if (isDryRun) {
logger.atInfo().log("Would hard-delete the following entities: %s", allKeys);
} else {
ofy().deleteWithoutBackup().keys(allKeys);
}
return allKeys.size();
});
getContext().incrementCounter("total entities deleted", numEntitiesDeleted);
}
}
}

View file

@ -0,0 +1,272 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.batch;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import static google.registry.mapreduce.MapreduceRunner.PARAM_DRY_RUN;
import static google.registry.model.ResourceTransferUtils.updateForeignKeyIndexDeletionTime;
import static google.registry.model.ofy.ObjectifyService.ofy;
import static google.registry.model.registry.Registries.getTldsOfType;
import static google.registry.model.reporting.HistoryEntry.Type.DOMAIN_DELETE;
import static google.registry.request.Action.Method.POST;
import static google.registry.request.RequestParameters.PARAM_TLDS;
import static org.joda.time.DateTimeZone.UTC;
import com.google.appengine.tools.mapreduce.Mapper;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.google.common.flogger.FluentLogger;
import com.googlecode.objectify.Key;
import google.registry.config.RegistryConfig.Config;
import google.registry.config.RegistryEnvironment;
import google.registry.dns.DnsQueue;
import google.registry.mapreduce.MapreduceRunner;
import google.registry.mapreduce.inputs.EppResourceInputs;
import google.registry.model.EppResourceUtils;
import google.registry.model.domain.DomainBase;
import google.registry.model.index.EppResourceIndex;
import google.registry.model.index.ForeignKeyIndex;
import google.registry.model.registry.Registry;
import google.registry.model.registry.Registry.TldType;
import google.registry.model.reporting.HistoryEntry;
import google.registry.request.Action;
import google.registry.request.Parameter;
import google.registry.request.Response;
import google.registry.request.auth.Auth;
import java.util.List;
import javax.inject.Inject;
import org.joda.time.DateTime;
import org.joda.time.Duration;
/**
* Deletes all prober DomainBases and their subordinate history entries, poll messages, and
* billing events, along with their ForeignKeyDomainIndex and EppResourceIndex entities.
*
* <p>See: https://www.youtube.com/watch?v=xuuv0syoHnM
*/
@Action(
service = Action.Service.BACKEND,
path = "/_dr/task/deleteProberData",
method = POST,
auth = Auth.AUTH_INTERNAL_ONLY)
public class DeleteProberDataAction implements Runnable {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
@Inject @Parameter(PARAM_DRY_RUN) boolean isDryRun;
/** List of TLDs to work on. If empty - will work on all TLDs that end with .test. */
@Inject @Parameter(PARAM_TLDS) ImmutableSet<String> tlds;
@Inject @Config("registryAdminClientId") String registryAdminClientId;
@Inject MapreduceRunner mrRunner;
@Inject Response response;
@Inject RegistryEnvironment registryEnvironment;
@Inject DeleteProberDataAction() {}
@Override
public void run() {
checkState(
!Strings.isNullOrEmpty(registryAdminClientId),
"Registry admin client ID must be configured for prober data deletion to work");
mrRunner
.setJobName("Delete prober data")
.setModuleName("backend")
.runMapOnly(
new DeleteProberDataMapper(getProberRoidSuffixes(), isDryRun, registryAdminClientId),
ImmutableList.of(EppResourceInputs.createKeyInput(DomainBase.class)))
.sendLinkToMapreduceConsole(response);
}
private ImmutableSet<String> getProberRoidSuffixes() {
checkArgument(
!RegistryEnvironment.PRODUCTION.equals(registryEnvironment)
|| tlds.stream().allMatch(tld -> tld.endsWith(".test")),
"On production, can only work on TLDs that end with .test");
ImmutableSet<String> deletableTlds =
getTldsOfType(TldType.TEST)
.stream()
.filter(tld -> tlds.isEmpty() ? tld.endsWith(".test") : tlds.contains(tld))
.collect(toImmutableSet());
checkArgument(
tlds.isEmpty() || deletableTlds.equals(tlds),
"If tlds are given, they must all exist and be TEST tlds. Given: %s, not found: %s",
tlds,
Sets.difference(tlds, deletableTlds));
return deletableTlds
.stream()
.map(tld -> Registry.get(tld).getRoidSuffix())
.collect(toImmutableSet());
}
/** Provides the map method that runs for each existing DomainBase entity. */
public static class DeleteProberDataMapper extends Mapper<Key<DomainBase>, Void, Void> {
private static final DnsQueue dnsQueue = DnsQueue.create();
private static final long serialVersionUID = -7724537393697576369L;
/**
* The maximum amount of time we allow a prober domain to be in use.
*
* In practice, the prober's connection will time out well before this duration. This includes a
* decent buffer.
*
*/
private static final Duration DOMAIN_USED_DURATION = Duration.standardHours(1);
/**
* The minimum amount of time we want a domain to be "soft deleted".
*
* The domain has to remain soft deleted for at least enough time for the DNS task to run and
* remove it from DNS itself. This is probably on the order of minutes.
*/
private static final Duration SOFT_DELETE_DELAY = Duration.standardHours(1);
private final ImmutableSet<String> proberRoidSuffixes;
private final Boolean isDryRun;
private final String registryAdminClientId;
public DeleteProberDataMapper(
ImmutableSet<String> proberRoidSuffixes, Boolean isDryRun, String registryAdminClientId) {
this.proberRoidSuffixes = proberRoidSuffixes;
this.isDryRun = isDryRun;
this.registryAdminClientId = registryAdminClientId;
}
@Override
public final void map(Key<DomainBase> key) {
try {
String roidSuffix = Iterables.getLast(Splitter.on('-').split(key.getName()));
if (proberRoidSuffixes.contains(roidSuffix)) {
deleteDomain(key);
} else {
getContext().incrementCounter("skipped, non-prober data");
}
} catch (Throwable t) {
logger.atSevere().withCause(t).log("Error while deleting prober data for key %s", key);
getContext().incrementCounter(String.format("error, kind %s", key.getKind()));
}
}
private void deleteDomain(final Key<DomainBase> domainKey) {
final DomainBase domain = ofy().load().key(domainKey).now();
DateTime now = DateTime.now(UTC);
if (domain == null) {
// Depending on how stale Datastore indexes are, we can get keys to resources that are
// already deleted (e.g. by a recent previous invocation of this mapreduce). So ignore them.
getContext().incrementCounter("already deleted");
return;
}
String domainName = domain.getFullyQualifiedDomainName();
if (domainName.equals("nic." + domain.getTld())) {
getContext().incrementCounter("skipped, NIC domain");
return;
}
if (now.isBefore(domain.getCreationTime().plus(DOMAIN_USED_DURATION))) {
getContext().incrementCounter("skipped, domain too new");
return;
}
if (!domain.getSubordinateHosts().isEmpty()) {
logger.atWarning().log(
"Cannot delete domain %s (%s) because it has subordinate hosts.",
domainName, domainKey);
getContext().incrementCounter("skipped, had subordinate host(s)");
return;
}
// If the domain is still active, that means that the prober encountered a failure and did not
// successfully soft-delete the domain (thus leaving its DNS entry published). We soft-delete
// it now so that the DNS entry can be handled. The domain will then be hard-deleted the next
// time the mapreduce is run.
if (EppResourceUtils.isActive(domain, now)) {
if (isDryRun) {
logger.atInfo().log(
"Would soft-delete the active domain: %s (%s)", domainName, domainKey);
} else {
softDeleteDomain(domain);
}
getContext().incrementCounter("domains soft-deleted");
return;
}
// If the domain isn't active, we want to make sure it hasn't been active for "a while" before
// deleting it. This prevents accidental double-map with the same key from immediately
// deleting active domains
if (now.isBefore(domain.getDeletionTime().plus(SOFT_DELETE_DELAY))) {
getContext().incrementCounter("skipped, domain too recently soft deleted");
return;
}
final Key<EppResourceIndex> eppIndex = Key.create(EppResourceIndex.create(domainKey));
final Key<? extends ForeignKeyIndex<?>> fki = ForeignKeyIndex.createKey(domain);
int entitiesDeleted =
ofy()
.transact(
() -> {
// This ancestor query selects all descendant HistoryEntries, BillingEvents,
// PollMessages,
// and TLD-specific entities, as well as the domain itself.
List<Key<Object>> domainAndDependentKeys =
ofy().load().ancestor(domainKey).keys().list();
ImmutableSet<Key<?>> allKeys =
new ImmutableSet.Builder<Key<?>>()
.add(fki)
.add(eppIndex)
.addAll(domainAndDependentKeys)
.build();
if (isDryRun) {
logger.atInfo().log("Would hard-delete the following entities: %s", allKeys);
} else {
ofy().deleteWithoutBackup().keys(allKeys);
}
return allKeys.size();
});
getContext().incrementCounter("domains hard-deleted");
getContext().incrementCounter("total entities hard-deleted", entitiesDeleted);
}
private void softDeleteDomain(final DomainBase domain) {
ofy().transactNew(() -> {
DomainBase deletedDomain = domain
.asBuilder()
.setDeletionTime(ofy().getTransactionTime())
.setStatusValues(null)
.build();
HistoryEntry historyEntry = new HistoryEntry.Builder()
.setParent(domain)
.setType(DOMAIN_DELETE)
.setModificationTime(ofy().getTransactionTime())
.setBySuperuser(true)
.setReason("Deletion of prober data")
.setClientId(registryAdminClientId)
.build();
// Note that we don't bother handling grace periods, billing events, pending transfers,
// poll messages, or auto-renews because these will all be hard-deleted the next time the
// mapreduce runs anyway.
ofy().save().entities(deletedDomain, historyEntry);
updateForeignKeyIndexDeletionTime(deletedDomain);
dnsQueue.addDomainRefreshTask(deletedDomain.getFullyQualifiedDomainName());
}
);
}
}
}

View file

@ -0,0 +1,329 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.batch;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import static com.google.common.collect.Sets.difference;
import static google.registry.mapreduce.MapreduceRunner.PARAM_DRY_RUN;
import static google.registry.mapreduce.inputs.EppResourceInputs.createChildEntityInput;
import static google.registry.model.common.Cursor.CursorType.RECURRING_BILLING;
import static google.registry.model.domain.Period.Unit.YEARS;
import static google.registry.model.ofy.ObjectifyService.ofy;
import static google.registry.model.reporting.HistoryEntry.Type.DOMAIN_AUTORENEW;
import static google.registry.pricing.PricingEngineProxy.getDomainRenewCost;
import static google.registry.util.CollectionUtils.union;
import static google.registry.util.DateTimeUtils.START_OF_TIME;
import static google.registry.util.DateTimeUtils.earliestOf;
import static google.registry.util.DomainNameUtils.getTldFromDomainName;
import com.google.appengine.tools.mapreduce.Mapper;
import com.google.appengine.tools.mapreduce.Reducer;
import com.google.appengine.tools.mapreduce.ReducerInput;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Range;
import com.google.common.collect.Streams;
import com.google.common.flogger.FluentLogger;
import com.googlecode.objectify.Key;
import google.registry.mapreduce.MapreduceRunner;
import google.registry.mapreduce.inputs.NullInput;
import google.registry.model.EppResource;
import google.registry.model.ImmutableObject;
import google.registry.model.billing.BillingEvent;
import google.registry.model.billing.BillingEvent.Flag;
import google.registry.model.billing.BillingEvent.OneTime;
import google.registry.model.billing.BillingEvent.Recurring;
import google.registry.model.common.Cursor;
import google.registry.model.domain.DomainBase;
import google.registry.model.domain.Period;
import google.registry.model.registry.Registry;
import google.registry.model.reporting.DomainTransactionRecord;
import google.registry.model.reporting.DomainTransactionRecord.TransactionReportField;
import google.registry.model.reporting.HistoryEntry;
import google.registry.request.Action;
import google.registry.request.Parameter;
import google.registry.request.Response;
import google.registry.request.auth.Auth;
import google.registry.util.Clock;
import java.util.Optional;
import java.util.Set;
import javax.inject.Inject;
import org.joda.money.Money;
import org.joda.time.DateTime;
/**
* A mapreduce that expands {@link Recurring} billing events into synthetic {@link OneTime} events.
*
* <p>The cursor used throughout this mapreduce (overridden if necessary using the parameter {@code
* cursorTime}) represents the inclusive lower bound on the range of billing times that will be
* expanded as a result of the job (the exclusive upper bound being the execution time of the job).
*/
@Action(
service = Action.Service.BACKEND,
path = "/_dr/task/expandRecurringBillingEvents",
auth = Auth.AUTH_INTERNAL_ONLY)
public class ExpandRecurringBillingEventsAction implements Runnable {
public static final String PARAM_CURSOR_TIME = "cursorTime";
private static final String ERROR_COUNTER = "errors";
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
@Inject Clock clock;
@Inject MapreduceRunner mrRunner;
@Inject @Parameter(PARAM_DRY_RUN) boolean isDryRun;
@Inject @Parameter(PARAM_CURSOR_TIME) Optional<DateTime> cursorTimeParam;
@Inject Response response;
@Inject ExpandRecurringBillingEventsAction() {}
@Override
public void run() {
Cursor cursor = ofy().load().key(Cursor.createGlobalKey(RECURRING_BILLING)).now();
DateTime executeTime = clock.nowUtc();
DateTime persistedCursorTime = (cursor == null ? START_OF_TIME : cursor.getCursorTime());
DateTime cursorTime = cursorTimeParam.orElse(persistedCursorTime);
checkArgument(
cursorTime.isBefore(executeTime),
"Cursor time must be earlier than execution time.");
logger.atInfo().log(
"Running Recurring billing event expansion for billing time range [%s, %s).",
cursorTime, executeTime);
mrRunner
.setJobName("Expand Recurring billing events into synthetic OneTime events.")
.setModuleName("backend")
.runMapreduce(
new ExpandRecurringBillingEventsMapper(isDryRun, cursorTime, clock.nowUtc()),
new ExpandRecurringBillingEventsReducer(isDryRun, persistedCursorTime),
// Add an extra shard that maps over a null recurring event (see the mapper for why).
ImmutableList.of(
new NullInput<>(),
createChildEntityInput(
ImmutableSet.of(DomainBase.class), ImmutableSet.of(Recurring.class))))
.sendLinkToMapreduceConsole(response);
}
/** Mapper to expand {@link Recurring} billing events into synthetic {@link OneTime} events. */
public static class ExpandRecurringBillingEventsMapper
extends Mapper<Recurring, DateTime, DateTime> {
private static final long serialVersionUID = 8376442755556228455L;
private final boolean isDryRun;
private final DateTime cursorTime;
private final DateTime executeTime;
public ExpandRecurringBillingEventsMapper(
boolean isDryRun, DateTime cursorTime, DateTime executeTime) {
this.isDryRun = isDryRun;
this.cursorTime = cursorTime;
this.executeTime = executeTime;
}
@Override
public final void map(final Recurring recurring) {
// This single emit forces the reducer to run at the end of the map job, so that a mapper
// that runs without error will advance the cursor at the end of processing (unless this was
// a dry run, in which case the cursor should not be advanced).
if (recurring == null) {
emit(cursorTime, executeTime);
return;
}
getContext().incrementCounter("Recurring billing events encountered");
// Ignore any recurring billing events that have yet to apply.
if (recurring.getEventTime().isAfter(executeTime)
// This second case occurs when a domain is transferred or deleted before first renewal.
|| recurring.getRecurrenceEndTime().isBefore(recurring.getEventTime())) {
getContext().incrementCounter("Recurring billing events ignored");
return;
}
int numBillingEventsSaved = 0;
try {
numBillingEventsSaved = ofy().transactNew(() -> {
ImmutableSet.Builder<OneTime> syntheticOneTimesBuilder =
new ImmutableSet.Builder<>();
final Registry tld = Registry.get(getTldFromDomainName(recurring.getTargetId()));
// Determine the complete set of times at which this recurring event should occur
// (up to and including the runtime of the mapreduce).
Iterable<DateTime> eventTimes =
recurring.getRecurrenceTimeOfYear().getInstancesInRange(Range.closed(
recurring.getEventTime(),
earliestOf(recurring.getRecurrenceEndTime(), executeTime)));
// Convert these event times to billing times
final ImmutableSet<DateTime> billingTimes =
getBillingTimesInScope(eventTimes, cursorTime, executeTime, tld);
Key<? extends EppResource> domainKey = recurring.getParentKey().getParent();
Iterable<OneTime> oneTimesForDomain =
ofy().load().type(OneTime.class).ancestor(domainKey);
// Determine the billing times that already have OneTime events persisted.
ImmutableSet<DateTime> existingBillingTimes =
getExistingBillingTimes(oneTimesForDomain, recurring);
ImmutableSet.Builder<HistoryEntry> historyEntriesBuilder =
new ImmutableSet.Builder<>();
// Create synthetic OneTime events for all billing times that do not yet have an event
// persisted.
for (DateTime billingTime : difference(billingTimes, existingBillingTimes)) {
// Construct a new HistoryEntry that parents over the OneTime
HistoryEntry historyEntry = new HistoryEntry.Builder()
.setBySuperuser(false)
.setClientId(recurring.getClientId())
.setModificationTime(ofy().getTransactionTime())
.setParent(domainKey)
.setPeriod(Period.create(1, YEARS))
.setReason("Domain autorenewal by ExpandRecurringBillingEventsAction")
.setRequestedByRegistrar(false)
.setType(DOMAIN_AUTORENEW)
.setDomainTransactionRecords(
ImmutableSet.of(
DomainTransactionRecord.create(
tld.getTldStr(),
// We report this when the autorenew grace period ends
billingTime,
TransactionReportField.netRenewsFieldFromYears(1),
1)))
.build();
historyEntriesBuilder.add(historyEntry);
DateTime eventTime = billingTime.minus(tld.getAutoRenewGracePeriodLength());
// Determine the cost for a one-year renewal.
Money renewCost = getDomainRenewCost(recurring.getTargetId(), eventTime, 1);
syntheticOneTimesBuilder.add(new OneTime.Builder()
.setBillingTime(billingTime)
.setClientId(recurring.getClientId())
.setCost(renewCost)
.setEventTime(eventTime)
.setFlags(union(recurring.getFlags(), Flag.SYNTHETIC))
.setParent(historyEntry)
.setPeriodYears(1)
.setReason(recurring.getReason())
.setSyntheticCreationTime(executeTime)
.setCancellationMatchingBillingEvent(Key.create(recurring))
.setTargetId(recurring.getTargetId())
.build());
}
Set<HistoryEntry> historyEntries = historyEntriesBuilder.build();
Set<OneTime> syntheticOneTimes = syntheticOneTimesBuilder.build();
if (!isDryRun) {
ImmutableSet<ImmutableObject> entitiesToSave =
new ImmutableSet.Builder<ImmutableObject>()
.addAll(historyEntries)
.addAll(syntheticOneTimes)
.build();
ofy().save().entities(entitiesToSave).now();
}
return syntheticOneTimes.size();
});
} catch (Throwable t) {
getContext().incrementCounter("error: " + t.getClass().getSimpleName());
getContext().incrementCounter(ERROR_COUNTER);
throw new RuntimeException(
String.format(
"Error while expanding Recurring billing events for %d", recurring.getId()),
t);
}
if (!isDryRun) {
getContext().incrementCounter("Saved OneTime billing events", numBillingEventsSaved);
} else {
getContext().incrementCounter(
"Generated OneTime billing events (dry run)", numBillingEventsSaved);
}
}
/**
* Filters a set of {@link DateTime}s down to event times that are in scope for a particular
* mapreduce run, given the cursor time and the mapreduce execution time.
*/
private ImmutableSet<DateTime> getBillingTimesInScope(
Iterable<DateTime> eventTimes,
DateTime cursorTime,
DateTime executeTime,
final Registry tld) {
return Streams.stream(eventTimes)
.map(eventTime -> eventTime.plus(tld.getAutoRenewGracePeriodLength()))
.filter(Range.closedOpen(cursorTime, executeTime))
.collect(toImmutableSet());
}
/**
* Determines an {@link ImmutableSet} of {@link DateTime}s that have already been persisted
* for a given recurring billing event.
*/
private ImmutableSet<DateTime> getExistingBillingTimes(
Iterable<BillingEvent.OneTime> oneTimesForDomain,
final BillingEvent.Recurring recurringEvent) {
return Streams.stream(oneTimesForDomain)
.filter(
billingEvent ->
Key.create(recurringEvent)
.equals(billingEvent.getCancellationMatchingBillingEvent()))
.map(OneTime::getBillingTime)
.collect(toImmutableSet());
}
}
/**
* "Reducer" to advance the cursor after all map jobs have been completed. The NullInput into the
* mapper will cause the mapper to emit one timestamp pair (current cursor and execution time),
* and the cursor will be advanced (and the timestamps logged) at the end of a successful
* mapreduce.
*/
public static class ExpandRecurringBillingEventsReducer
extends Reducer<DateTime, DateTime, Void> {
private final boolean isDryRun;
private final DateTime expectedPersistedCursorTime;
public ExpandRecurringBillingEventsReducer(
boolean isDryRun, DateTime expectedPersistedCursorTime) {
this.isDryRun = isDryRun;
this.expectedPersistedCursorTime = expectedPersistedCursorTime;
}
@Override
public void reduce(final DateTime cursorTime, final ReducerInput<DateTime> executionTimeInput) {
if (getContext().getCounter(ERROR_COUNTER).getValue() > 0) {
logger.atSevere().log(
"One or more errors logged during recurring event expansion. Cursor will"
+ " not be advanced.");
return;
}
final DateTime executionTime = executionTimeInput.next();
logger.atInfo().log(
"Recurring event expansion %s complete for billing event range [%s, %s).",
isDryRun ? "(dry run) " : "", cursorTime, executionTime);
ofy()
.transact(
() -> {
Cursor cursor = ofy().load().key(Cursor.createGlobalKey(RECURRING_BILLING)).now();
DateTime currentCursorTime =
(cursor == null ? START_OF_TIME : cursor.getCursorTime());
if (!currentCursorTime.equals(expectedPersistedCursorTime)) {
logger.atSevere().log(
"Current cursor position %s does not match expected cursor position %s.",
currentCursorTime, expectedPersistedCursorTime);
return;
}
if (!isDryRun) {
ofy().save().entity(Cursor.createGlobal(RECURRING_BILLING, executionTime));
}
});
}
}
}

View file

@ -0,0 +1,336 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.batch;
import static com.google.appengine.api.taskqueue.QueueConstants.maxLeaseCount;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static google.registry.batch.AsyncTaskEnqueuer.PARAM_HOST_KEY;
import static google.registry.batch.AsyncTaskEnqueuer.PARAM_REQUESTED_TIME;
import static google.registry.batch.AsyncTaskEnqueuer.QUEUE_ASYNC_HOST_RENAME;
import static google.registry.batch.AsyncTaskMetrics.OperationType.DNS_REFRESH;
import static google.registry.mapreduce.inputs.EppResourceInputs.createEntityInput;
import static google.registry.model.EppResourceUtils.isActive;
import static google.registry.model.EppResourceUtils.isDeleted;
import static google.registry.model.ofy.ObjectifyService.ofy;
import static google.registry.util.DateTimeUtils.latestOf;
import static java.util.concurrent.TimeUnit.DAYS;
import static java.util.concurrent.TimeUnit.SECONDS;
import static java.util.logging.Level.INFO;
import static java.util.logging.Level.SEVERE;
import static org.joda.time.Duration.standardHours;
import com.google.appengine.api.taskqueue.LeaseOptions;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.appengine.api.taskqueue.TransientFailureException;
import com.google.appengine.tools.mapreduce.Mapper;
import com.google.appengine.tools.mapreduce.Reducer;
import com.google.appengine.tools.mapreduce.ReducerInput;
import com.google.auto.value.AutoValue;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.flogger.FluentLogger;
import com.googlecode.objectify.Key;
import google.registry.batch.AsyncTaskMetrics.OperationResult;
import google.registry.dns.DnsQueue;
import google.registry.mapreduce.MapreduceRunner;
import google.registry.mapreduce.inputs.NullInput;
import google.registry.model.domain.DomainBase;
import google.registry.model.host.HostResource;
import google.registry.model.server.Lock;
import google.registry.request.Action;
import google.registry.request.Response;
import google.registry.request.auth.Auth;
import google.registry.util.Clock;
import google.registry.util.NonFinalForTesting;
import google.registry.util.RequestStatusChecker;
import google.registry.util.Retrier;
import google.registry.util.SystemClock;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.logging.Level;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.inject.Named;
import org.joda.time.DateTime;
import org.joda.time.Duration;
/** Performs batched DNS refreshes for applicable domains following a host rename. */
@Action(
service = Action.Service.BACKEND,
path = "/_dr/task/refreshDnsOnHostRename",
auth = Auth.AUTH_INTERNAL_ONLY)
public class RefreshDnsOnHostRenameAction implements Runnable {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private static final Duration LEASE_LENGTH = standardHours(4);
@Inject AsyncTaskMetrics asyncTaskMetrics;
@Inject Clock clock;
@Inject MapreduceRunner mrRunner;
@Inject @Named(QUEUE_ASYNC_HOST_RENAME) Queue pullQueue;
@Inject RequestStatusChecker requestStatusChecker;
@Inject Response response;
@Inject Retrier retrier;
@Inject RefreshDnsOnHostRenameAction() {}
@Override
public void run() {
// Check if the lock can be acquired, and if not, a previous run of this mapreduce is still
// executing, so return early.
Optional<Lock> lock =
Lock.acquire(
RefreshDnsOnHostRenameAction.class.getSimpleName(),
null,
LEASE_LENGTH,
requestStatusChecker,
false);
if (!lock.isPresent()) {
logRespondAndUnlock(INFO, "Can't acquire lock; aborting.", lock);
return;
}
// Lease the async tasks to process.
LeaseOptions options =
LeaseOptions.Builder.withCountLimit(maxLeaseCount())
.leasePeriod(LEASE_LENGTH.getStandardSeconds(), SECONDS);
List<TaskHandle> tasks = pullQueue.leaseTasks(options);
asyncTaskMetrics.recordDnsRefreshBatchSize(tasks.size());
// Check if there are no tasks to process, and if so, return early.
if (tasks.isEmpty()) {
logRespondAndUnlock(
INFO, "No DNS refresh on host rename tasks to process in pull queue; finishing.", lock);
return;
}
ImmutableList.Builder<DnsRefreshRequest> requestsBuilder = new ImmutableList.Builder<>();
ImmutableList.Builder<Key<HostResource>> hostKeys = new ImmutableList.Builder<>();
final List<DnsRefreshRequest> requestsToDelete = new ArrayList<>();
for (TaskHandle task : tasks) {
try {
DnsRefreshRequest request = DnsRefreshRequest.createFromTask(task, clock.nowUtc());
if (request.isRefreshNeeded()) {
requestsBuilder.add(request);
hostKeys.add(request.hostKey());
} else {
// Skip hosts that are deleted.
requestsToDelete.add(request);
}
} catch (Exception e) {
logger.atSevere().withCause(e).log(
"Could not parse DNS refresh for host request, delaying task for a day: %s", task);
// Grab the lease for a whole day, so it won't continue throwing errors every five minutes.
pullQueue.modifyTaskLease(task, 1L, DAYS);
}
}
deleteTasksWithRetry(
requestsToDelete, pullQueue, asyncTaskMetrics, retrier, OperationResult.STALE);
ImmutableList<DnsRefreshRequest> refreshRequests = requestsBuilder.build();
if (refreshRequests.isEmpty()) {
logRespondAndUnlock(
INFO, "No async DNS refreshes to process because all renamed hosts are deleted.", lock);
} else {
logger.atInfo().log(
"Processing asynchronous DNS refresh for renamed hosts: %s", hostKeys.build());
runMapreduce(refreshRequests, lock);
}
}
private void runMapreduce(ImmutableList<DnsRefreshRequest> refreshRequests, Optional<Lock> lock) {
try {
mrRunner
.setJobName("Enqueue DNS refreshes for domains referencing renamed hosts")
.setModuleName("backend")
.setDefaultReduceShards(1)
.runMapreduce(
new RefreshDnsOnHostRenameMapper(refreshRequests, retrier),
new RefreshDnsOnHostRenameReducer(refreshRequests, lock.get(), retrier),
// Add an extra NullInput so that the reducer always fires exactly once.
ImmutableList.of(new NullInput<>(), createEntityInput(DomainBase.class)))
.sendLinkToMapreduceConsole(response);
} catch (Throwable t) {
logRespondAndUnlock(
SEVERE, "Error starting mapreduce to refresh DNS for renamed hosts.", lock);
}
}
private void logRespondAndUnlock(Level level, String message, Optional<Lock> lock) {
logger.at(level).log(message);
response.setPayload(message);
lock.ifPresent(Lock::release);
}
/** Map over domains and refresh the DNS of those that reference the renamed hosts. */
public static class RefreshDnsOnHostRenameMapper
extends Mapper<DomainBase, Boolean, Boolean> {
private static final long serialVersionUID = -5261698524424335531L;
private static final DnsQueue dnsQueue = DnsQueue.create();
private final ImmutableList<DnsRefreshRequest> refreshRequests;
private final Retrier retrier;
RefreshDnsOnHostRenameMapper(
ImmutableList<DnsRefreshRequest> refreshRequests, Retrier retrier) {
this.refreshRequests = refreshRequests;
this.retrier = retrier;
}
@Override
public final void map(@Nullable final DomainBase domain) {
if (domain == null) {
// Emit a single value so that the reducer always runs. The key and value don't matter.
emit(true, true);
return;
}
Key<HostResource> referencingHostKey = null;
for (DnsRefreshRequest request : refreshRequests) {
if (isActive(domain, request.lastUpdateTime())
&& domain.getNameservers().contains(request.hostKey())) {
referencingHostKey = request.hostKey();
break;
}
}
if (referencingHostKey != null) {
retrier.callWithRetry(
() -> dnsQueue.addDomainRefreshTask(domain.getFullyQualifiedDomainName()),
TransientFailureException.class);
logger.atInfo().log(
"Enqueued DNS refresh for domain %s referenced by host %s.",
domain.getFullyQualifiedDomainName(), referencingHostKey);
getContext().incrementCounter("domains refreshed");
} else {
getContext().incrementCounter("domains not refreshed");
}
// Don't catch errors -- we allow the mapreduce to terminate on any errors that can't be
// resolved by retrying the transaction. The reducer only fires if the mapper completes
// without errors, meaning that it is acceptable to delete all tasks.
}
}
/**
* A reducer that always fires exactly once.
*
* <p>This is really a reducer in name only; what it's really doing is waiting for all of the
* mapper tasks to finish, and then delete the pull queue tasks. Note that this only happens if
* the mapper completes execution without errors.
*/
public static class RefreshDnsOnHostRenameReducer extends Reducer<Boolean, Boolean, Void> {
private static final long serialVersionUID = 9077366205249562118L;
@NonFinalForTesting
private static AsyncTaskMetrics asyncTaskMetrics = new AsyncTaskMetrics(new SystemClock());
private final Lock lock;
private final Retrier retrier;
private final List<DnsRefreshRequest> refreshRequests;
RefreshDnsOnHostRenameReducer(
List<DnsRefreshRequest> refreshRequests, Lock lock, Retrier retrier) {
this.refreshRequests = refreshRequests;
this.lock = lock;
this.retrier = retrier;
}
@Override
public void reduce(Boolean key, ReducerInput<Boolean> values) {
// The reduce() method is run precisely once, because the NullInput caused the mapper to emit
// a dummy value once.
deleteTasksWithRetry(
refreshRequests,
getQueue(QUEUE_ASYNC_HOST_RENAME),
asyncTaskMetrics,
retrier,
OperationResult.SUCCESS);
lock.release();
}
}
/** Deletes a list of tasks from the given queue using a retrier. */
private static void deleteTasksWithRetry(
final List<DnsRefreshRequest> refreshRequests,
final Queue queue,
AsyncTaskMetrics asyncTaskMetrics,
Retrier retrier,
OperationResult result) {
if (refreshRequests.isEmpty()) {
return;
}
final List<TaskHandle> tasks =
refreshRequests.stream().map(DnsRefreshRequest::task).collect(toImmutableList());
retrier.callWithRetry(() -> queue.deleteTask(tasks), TransientFailureException.class);
refreshRequests.forEach(
r -> asyncTaskMetrics.recordAsyncFlowResult(DNS_REFRESH, result, r.requestedTime()));
}
/** A class that encapsulates the values of a request to refresh DNS for a renamed host. */
@AutoValue
abstract static class DnsRefreshRequest implements Serializable {
private static final long serialVersionUID = 1772812852271288622L;
abstract Key<HostResource> hostKey();
abstract DateTime lastUpdateTime();
abstract DateTime requestedTime();
abstract boolean isRefreshNeeded();
abstract TaskHandle task();
@AutoValue.Builder
abstract static class Builder {
abstract Builder setHostKey(Key<HostResource> hostKey);
abstract Builder setLastUpdateTime(DateTime lastUpdateTime);
abstract Builder setRequestedTime(DateTime requestedTime);
abstract Builder setIsRefreshNeeded(boolean isRefreshNeeded);
abstract Builder setTask(TaskHandle task);
abstract DnsRefreshRequest build();
}
/**
* Returns a packaged-up {@link DnsRefreshRequest} parsed from a task queue task.
*/
static DnsRefreshRequest createFromTask(TaskHandle task, DateTime now) throws Exception {
ImmutableMap<String, String> params = ImmutableMap.copyOf(task.extractParams());
Key<HostResource> hostKey =
Key.create(checkNotNull(params.get(PARAM_HOST_KEY), "Host to refresh not specified"));
HostResource host =
checkNotNull(ofy().load().key(hostKey).now(), "Host to refresh doesn't exist");
boolean isHostDeleted =
isDeleted(host, latestOf(now, host.getUpdateAutoTimestamp().getTimestamp()));
if (isHostDeleted) {
logger.atInfo().log("Host %s is already deleted, not refreshing DNS.", hostKey);
}
return new AutoValue_RefreshDnsOnHostRenameAction_DnsRefreshRequest.Builder()
.setHostKey(hostKey)
.setLastUpdateTime(host.getUpdateAutoTimestamp().getTimestamp())
.setRequestedTime(
DateTime.parse(
checkNotNull(params.get(PARAM_REQUESTED_TIME), "Requested time not specified")))
.setIsRefreshNeeded(!isHostDeleted)
.setTask(task)
.build();
}
}
}

View file

@ -0,0 +1,87 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.batch;
import static google.registry.model.ofy.ObjectifyService.ofy;
import com.google.appengine.tools.mapreduce.Mapper;
import com.google.common.collect.ImmutableList;
import com.googlecode.objectify.Key;
import google.registry.mapreduce.MapreduceRunner;
import google.registry.mapreduce.inputs.EppResourceInputs;
import google.registry.model.EppResource;
import google.registry.request.Action;
import google.registry.request.Response;
import google.registry.request.auth.Auth;
import javax.inject.Inject;
/**
* A mapreduce that re-saves all EppResources, projecting them forward to the current time.
*
* <p>This is useful for completing data migrations on EppResource fields that are accomplished
* with @OnSave or @OnLoad annotations, and also guarantees that all EppResources will get fresh
* commit logs (for backup purposes). Additionally, pending actions such as transfers or grace
* periods that are past their effective time will be resolved.
*
* <p>Because there are no auth settings in the {@link Action} annotation, this command can only be
* run internally, or by pretending to be internal by setting the X-AppEngine-QueueName header,
* which only admin users can do.
*/
@Action(
service = Action.Service.BACKEND,
path = "/_dr/task/resaveAllEppResources",
auth = Auth.AUTH_INTERNAL_OR_ADMIN)
public class ResaveAllEppResourcesAction implements Runnable {
@Inject MapreduceRunner mrRunner;
@Inject Response response;
@Inject ResaveAllEppResourcesAction() {}
@Override
public void run() {
mrRunner
.setJobName("Re-save all EPP resources")
.setModuleName("backend")
.runMapOnly(
new ResaveAllEppResourcesActionMapper(),
ImmutableList.of(EppResourceInputs.createKeyInput(EppResource.class)))
.sendLinkToMapreduceConsole(response);
}
/** Mapper to re-save all EPP resources. */
public static class ResaveAllEppResourcesActionMapper
extends Mapper<Key<EppResource>, Void, Void> {
private static final long serialVersionUID = -7721628665138087001L;
public ResaveAllEppResourcesActionMapper() {}
@Override
public final void map(final Key<EppResource> resourceKey) {
ofy()
.transact(
() -> {
EppResource projectedResource =
ofy()
.load()
.key(resourceKey)
.now()
.cloneProjectedAtTime(ofy().getTransactionTime());
ofy().save().entity(projectedResource).now();
});
getContext().incrementCounter(String.format("%s entities re-saved", resourceKey.getKind()));
}
}
}

View file

@ -0,0 +1,88 @@
// Copyright 2018 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.batch;
import static google.registry.batch.AsyncTaskEnqueuer.PARAM_REQUESTED_TIME;
import static google.registry.batch.AsyncTaskEnqueuer.PARAM_RESAVE_TIMES;
import static google.registry.batch.AsyncTaskEnqueuer.PARAM_RESOURCE_KEY;
import static google.registry.model.ofy.ObjectifyService.ofy;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSortedSet;
import com.google.common.flogger.FluentLogger;
import com.googlecode.objectify.Key;
import google.registry.model.EppResource;
import google.registry.model.ImmutableObject;
import google.registry.request.Action;
import google.registry.request.Action.Method;
import google.registry.request.Parameter;
import google.registry.request.Response;
import google.registry.request.auth.Auth;
import javax.inject.Inject;
import org.joda.time.DateTime;
/**
* An action that re-saves a given entity, typically after a certain amount of time has passed.
*
* <p>{@link EppResource}s will be projected forward to the current time.
*/
@Action(
service = Action.Service.BACKEND,
path = ResaveEntityAction.PATH,
auth = Auth.AUTH_INTERNAL_OR_ADMIN,
method = Method.POST)
public class ResaveEntityAction implements Runnable {
public static final String PATH = "/_dr/task/resaveEntity";
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private final Key<ImmutableObject> resourceKey;
private final DateTime requestedTime;
private final ImmutableSortedSet<DateTime> resaveTimes;
private final AsyncTaskEnqueuer asyncTaskEnqueuer;
private final Response response;
@Inject
ResaveEntityAction(
@Parameter(PARAM_RESOURCE_KEY) Key<ImmutableObject> resourceKey,
@Parameter(PARAM_REQUESTED_TIME) DateTime requestedTime,
@Parameter(PARAM_RESAVE_TIMES) ImmutableSet<DateTime> resaveTimes,
AsyncTaskEnqueuer asyncTaskEnqueuer,
Response response) {
this.resourceKey = resourceKey;
this.requestedTime = requestedTime;
this.resaveTimes = ImmutableSortedSet.copyOf(resaveTimes);
this.asyncTaskEnqueuer = asyncTaskEnqueuer;
this.response = response;
}
@Override
public void run() {
logger.atInfo().log(
"Re-saving entity %s which was enqueued at %s.", resourceKey, requestedTime);
ofy().transact(() -> {
ImmutableObject entity = ofy().load().key(resourceKey).now();
ofy().save().entity(
(entity instanceof EppResource)
? ((EppResource) entity).cloneProjectedAtTime(ofy().getTransactionTime()) : entity
);
if (!resaveTimes.isEmpty()) {
asyncTaskEnqueuer.enqueueAsyncResave(entity, requestedTime, resaveTimes);
}
});
response.setPayload("Entity re-saved.");
}
}

View file

@ -0,0 +1,21 @@
package(
default_visibility = ["//visibility:public"],
)
licenses(["notice"]) # Apache 2.0
java_library(
name = "beam",
srcs = glob(["*.java"]),
deps = [
"//java/google/registry/util",
"@com_google_flogger",
"@com_google_flogger_system_backend",
"@com_google_guava",
"@org_apache_avro",
"@org_apache_beam_runners_direct_java",
"@org_apache_beam_runners_google_cloud_dataflow_java",
"@org_apache_beam_sdks_java_core",
"@org_apache_beam_sdks_java_io_google_cloud_platform",
],
)

View file

@ -0,0 +1,64 @@
// Copyright 2018 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.beam;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import com.google.common.io.Resources;
import google.registry.util.ResourceUtils;
import org.apache.avro.generic.GenericRecord;
import org.apache.beam.sdk.io.gcp.bigquery.SchemaAndRecord;
/** Static utilities for {@code Beam} pipelines. */
public class BeamUtils {
/** Extracts a string representation of a field in a {@link GenericRecord}. */
public static String extractField(GenericRecord record, String fieldName) {
return String.valueOf(record.get(fieldName));
}
/**
* Checks that no expected fields in the record are missing.
*
* <p>Note that this simply makes sure the field is not null; it may still generate a parse error
* when interpreting the string representation of an object.
*
* @throws IllegalStateException if the record returns null for any field in {@code fieldNames}
*/
public static void checkFieldsNotNull(
ImmutableList<String> fieldNames, SchemaAndRecord schemaAndRecord) {
GenericRecord record = schemaAndRecord.getRecord();
ImmutableList<String> nullFields =
fieldNames
.stream()
.filter(fieldName -> record.get(fieldName) == null)
.collect(ImmutableList.toImmutableList());
String missingFieldList = Joiner.on(", ").join(nullFields);
if (!nullFields.isEmpty()) {
throw new IllegalStateException(
String.format(
"Read unexpected null value for field(s) %s for record %s",
missingFieldList, record));
}
}
/**
* Returns the {@link String} contents for a file in the {@code sql/} directory relative to a
* class.
*/
public static String getQueryFromFile(Class<?> clazz, String filename) {
return ResourceUtils.readResourceUtf8(Resources.getResource(clazz, "sql/" + filename));
}
}

View file

@ -0,0 +1,31 @@
package(
default_visibility = ["//visibility:public"],
)
licenses(["notice"]) # Apache 2.0
java_library(
name = "invoicing",
srcs = glob(["*.java"]),
resources = glob(["sql/*"]),
deps = [
"//java/google/registry/beam",
"//java/google/registry/config",
"//java/google/registry/model",
"//java/google/registry/reporting/billing",
"//java/google/registry/util",
"@com_google_apis_google_api_services_bigquery",
"@com_google_auth_library_oauth2_http",
"@com_google_auto_value",
"@com_google_dagger",
"@com_google_flogger",
"@com_google_flogger_system_backend",
"@com_google_guava",
"@javax_inject",
"@org_apache_avro",
"@org_apache_beam_runners_direct_java",
"@org_apache_beam_runners_google_cloud_dataflow_java",
"@org_apache_beam_sdks_java_core",
"@org_apache_beam_sdks_java_io_google_cloud_platform",
],
)

View file

@ -0,0 +1,365 @@
// Copyright 2018 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.beam.invoicing;
import static google.registry.beam.BeamUtils.checkFieldsNotNull;
import static google.registry.beam.BeamUtils.extractField;
import com.google.auto.value.AutoValue;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import google.registry.model.billing.BillingEvent.Flag;
import google.registry.reporting.billing.BillingModule;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Serializable;
import java.text.DecimalFormat;
import java.time.Instant;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.util.stream.Collectors;
import org.apache.avro.generic.GenericRecord;
import org.apache.beam.sdk.coders.AtomicCoder;
import org.apache.beam.sdk.coders.Coder;
import org.apache.beam.sdk.coders.StringUtf8Coder;
import org.apache.beam.sdk.io.gcp.bigquery.SchemaAndRecord;
/**
* A POJO representing a single billable event, parsed from a {@code SchemaAndRecord}.
*
* <p>This is a trivially serializable class that allows Beam to transform the results of a Bigquery
* query into a standard Java representation, giving us the type guarantees and ease of manipulation
* Bigquery lacks, while localizing any Bigquery-side failures to the {@link #parseFromRecord}
* function.
*/
@AutoValue
public abstract class BillingEvent implements Serializable {
private static final DateTimeFormatter DATE_TIME_FORMATTER =
DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss zzz");
/** The amount we multiply the price for sunrise creates. This is currently a 15% discount. */
private static final double SUNRISE_DISCOUNT_PRICE_MODIFIER = 0.85;
private static final ImmutableList<String> FIELD_NAMES =
ImmutableList.of(
"id",
"billingTime",
"eventTime",
"registrarId",
"billingId",
"poNumber",
"tld",
"action",
"domain",
"repositoryId",
"years",
"currency",
"amount",
"flags");
/** Returns the unique Objectify ID for the {@code OneTime} associated with this event. */
abstract long id();
/** Returns the UTC DateTime this event becomes billable. */
abstract ZonedDateTime billingTime();
/** Returns the UTC DateTime this event was generated. */
abstract ZonedDateTime eventTime();
/** Returns the billed registrar's name. */
abstract String registrarId();
/** Returns the billed registrar's billing account key. */
abstract String billingId();
/** Returns the Purchase Order number. */
abstract String poNumber();
/** Returns the tld this event was generated for. */
abstract String tld();
/** Returns the billable action this event was generated for (i.e. RENEW, CREATE, TRANSFER...) */
abstract String action();
/** Returns the fully qualified domain name this event was generated for. */
abstract String domain();
/** Returns the unique RepoID associated with the billed domain. */
abstract String repositoryId();
/** Returns the number of years this billing event is made out for. */
abstract int years();
/** Returns the 3-letter currency code for the billing event (i.e. USD or JPY.) */
abstract String currency();
/** Returns the cost associated with this billing event. */
abstract double amount();
/** Returns a list of space-delimited flags associated with the event. */
abstract String flags();
/**
* Constructs a {@code BillingEvent} from a {@code SchemaAndRecord}.
*
* @see <a
* href=http://avro.apache.org/docs/1.7.7/api/java/org/apache/avro/generic/GenericData.Record.html>
* Apache AVRO GenericRecord</a>
*/
static BillingEvent parseFromRecord(SchemaAndRecord schemaAndRecord) {
checkFieldsNotNull(FIELD_NAMES, schemaAndRecord);
GenericRecord record = schemaAndRecord.getRecord();
String flags = extractField(record, "flags");
double amount = getDiscountedAmount(Double.parseDouble(extractField(record, "amount")), flags);
return create(
// We need to chain parsers off extractField because GenericRecord only returns
// Objects, which contain a string representation of their underlying types.
Long.parseLong(extractField(record, "id")),
// Bigquery provides UNIX timestamps with microsecond precision.
Instant.ofEpochMilli(Long.parseLong(extractField(record, "billingTime")) / 1000)
.atZone(ZoneId.of("UTC")),
Instant.ofEpochMilli(Long.parseLong(extractField(record, "eventTime")) / 1000)
.atZone(ZoneId.of("UTC")),
extractField(record, "registrarId"),
extractField(record, "billingId"),
extractField(record, "poNumber"),
extractField(record, "tld"),
extractField(record, "action"),
extractField(record, "domain"),
extractField(record, "repositoryId"),
Integer.parseInt(extractField(record, "years")),
extractField(record, "currency"),
amount,
flags);
}
/**
* Applies a discount to sunrise creates and anchor tenant creates if applicable.
*
* Currently sunrise creates are discounted 15% and anchor tenant creates are free for 2 years.
* All anchor tenant creates are enforced to be 2 years in
* {@link google.registry.flows.domain.DomainCreateFlow#verifyAnchorTenantValidPeriod}.
*/
private static double getDiscountedAmount(double amount, String flags) {
// Apply a configurable discount to sunrise creates.
if (flags.contains(Flag.SUNRISE.name())) {
amount =
Double.parseDouble(
new DecimalFormat("#.##").format(amount * SUNRISE_DISCOUNT_PRICE_MODIFIER));
}
// Anchor tenant creates are free for the initial create. This is enforced to be a 2 year period
// upon domain create.
if (flags.contains(Flag.ANCHOR_TENANT.name())) {
amount = 0;
}
return amount;
}
/**
* Creates a concrete {@code BillingEvent}.
*
* <p>This should only be used outside this class for testing- instances of {@code BillingEvent}
* should otherwise come from {@link #parseFromRecord}.
*/
@VisibleForTesting
static BillingEvent create(
long id,
ZonedDateTime billingTime,
ZonedDateTime eventTime,
String registrarId,
String billingId,
String poNumber,
String tld,
String action,
String domain,
String repositoryId,
int years,
String currency,
double amount,
String flags) {
return new AutoValue_BillingEvent(
id,
billingTime,
eventTime,
registrarId,
billingId,
poNumber,
tld,
action,
domain,
repositoryId,
years,
currency,
amount,
flags);
}
static String getHeader() {
return FIELD_NAMES.stream().collect(Collectors.joining(","));
}
/**
* Generates the filename associated with this {@code BillingEvent}.
*
* <p>When modifying this function, take care to ensure that there's no way to generate an illegal
* filepath with the arguments, such as "../sensitive_info".
*/
String toFilename(String yearMonth) {
return String.format(
"%s_%s_%s_%s", BillingModule.DETAIL_REPORT_PREFIX, yearMonth, registrarId(), tld());
}
/** Generates a CSV representation of this {@code BillingEvent}. */
String toCsv() {
return Joiner.on(",")
.join(
ImmutableList.of(
id(),
DATE_TIME_FORMATTER.format(billingTime()),
DATE_TIME_FORMATTER.format(eventTime()),
registrarId(),
billingId(),
tld(),
action(),
domain(),
repositoryId(),
years(),
currency(),
String.format("%.2f", amount()),
// Strip out the 'synthetic' flag, which is internal only.
flags().replace("SYNTHETIC", "").trim()));
}
/** Returns the grouping key for this {@code BillingEvent}, to generate the overall invoice. */
InvoiceGroupingKey getInvoiceGroupingKey() {
return new AutoValue_BillingEvent_InvoiceGroupingKey(
billingTime().toLocalDate().withDayOfMonth(1).toString(),
billingTime().toLocalDate().withDayOfMonth(1).plusYears(years()).minusDays(1).toString(),
billingId(),
String.format("%s - %s", registrarId(), tld()),
String.format("%s | TLD: %s | TERM: %d-year", action(), tld(), years()),
amount(),
currency(),
poNumber());
}
/** Key for each {@code BillingEvent}, when aggregating for the overall invoice. */
@AutoValue
abstract static class InvoiceGroupingKey implements Serializable {
private static final ImmutableList<String> INVOICE_HEADERS =
ImmutableList.of(
"StartDate",
"EndDate",
"ProductAccountKey",
"Amount",
"AmountCurrency",
"BillingProductCode",
"SalesChannel",
"LineItemType",
"UsageGroupingKey",
"Quantity",
"Description",
"UnitPrice",
"UnitPriceCurrency",
"PONumber");
/** Returns the first day this invoice is valid, in yyyy-MM-dd format. */
abstract String startDate();
/** Returns the last day this invoice is valid, in yyyy-MM-dd format. */
abstract String endDate();
/** Returns the billing account id, which is the {@code BillingEvent.billingId}. */
abstract String productAccountKey();
/** Returns the invoice grouping key, which is in the format "registrarId - tld". */
abstract String usageGroupingKey();
/** Returns a description of the item, formatted as "action | TLD: tld | TERM: n-year." */
abstract String description();
/** Returns the cost per invoice item. */
abstract Double unitPrice();
/** Returns the 3-digit currency code the unit price uses. */
abstract String unitPriceCurrency();
/** Returns the purchase order number for the item, blank for most registrars. */
abstract String poNumber();
/** Generates the CSV header for the overall invoice. */
static String invoiceHeader() {
return Joiner.on(",").join(INVOICE_HEADERS);
}
/** Generates a CSV representation of n aggregate billing events. */
String toCsv(Long quantity) {
double totalPrice = unitPrice() * quantity;
return Joiner.on(",")
.join(
ImmutableList.of(
startDate(),
endDate(),
productAccountKey(),
String.format("%.2f", totalPrice),
unitPriceCurrency(),
"10125",
"1",
"PURCHASE",
usageGroupingKey(),
String.format("%d", quantity),
description(),
String.format("%.2f", unitPrice()),
unitPriceCurrency(),
poNumber()));
}
/** Coder that provides deterministic (de)serialization for {@code InvoiceGroupingKey}. */
static class InvoiceGroupingKeyCoder extends AtomicCoder<InvoiceGroupingKey> {
@Override
public void encode(InvoiceGroupingKey value, OutputStream outStream) throws IOException {
Coder<String> stringCoder = StringUtf8Coder.of();
stringCoder.encode(value.startDate(), outStream);
stringCoder.encode(value.endDate(), outStream);
stringCoder.encode(value.productAccountKey(), outStream);
stringCoder.encode(value.usageGroupingKey(), outStream);
stringCoder.encode(value.description(), outStream);
stringCoder.encode(String.valueOf(value.unitPrice()), outStream);
stringCoder.encode(value.unitPriceCurrency(), outStream);
stringCoder.encode(value.poNumber(), outStream);
}
@Override
public InvoiceGroupingKey decode(InputStream inStream) throws IOException {
Coder<String> stringCoder = StringUtf8Coder.of();
return new AutoValue_BillingEvent_InvoiceGroupingKey(
stringCoder.decode(inStream),
stringCoder.decode(inStream),
stringCoder.decode(inStream),
stringCoder.decode(inStream),
stringCoder.decode(inStream),
Double.parseDouble(stringCoder.decode(inStream)),
stringCoder.decode(inStream),
stringCoder.decode(inStream));
}
}
}
}

View file

@ -0,0 +1,209 @@
// Copyright 2018 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.beam.invoicing;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.auth.oauth2.GoogleCredentials;
import google.registry.beam.invoicing.BillingEvent.InvoiceGroupingKey;
import google.registry.beam.invoicing.BillingEvent.InvoiceGroupingKey.InvoiceGroupingKeyCoder;
import google.registry.config.CredentialModule.LocalCredentialJson;
import google.registry.config.RegistryConfig.Config;
import google.registry.reporting.billing.BillingModule;
import google.registry.reporting.billing.GenerateInvoicesAction;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.Serializable;
import javax.inject.Inject;
import org.apache.beam.runners.dataflow.DataflowRunner;
import org.apache.beam.runners.dataflow.options.DataflowPipelineOptions;
import org.apache.beam.sdk.Pipeline;
import org.apache.beam.sdk.coders.SerializableCoder;
import org.apache.beam.sdk.io.DefaultFilenamePolicy.Params;
import org.apache.beam.sdk.io.FileBasedSink;
import org.apache.beam.sdk.io.TextIO;
import org.apache.beam.sdk.io.gcp.bigquery.BigQueryIO;
import org.apache.beam.sdk.options.Description;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.options.ValueProvider;
import org.apache.beam.sdk.options.ValueProvider.NestedValueProvider;
import org.apache.beam.sdk.transforms.Count;
import org.apache.beam.sdk.transforms.Filter;
import org.apache.beam.sdk.transforms.MapElements;
import org.apache.beam.sdk.transforms.PTransform;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.TypeDescriptor;
import org.apache.beam.sdk.values.TypeDescriptors;
/**
* Definition of a Dataflow pipeline template, which generates a given month's invoices.
*
* <p>To stage this template on GCS, run the {@link
* google.registry.tools.DeployInvoicingPipelineCommand} Nomulus command.
*
* <p>Then, you can run the staged template via the API client library, gCloud or a raw REST call.
* For an example using the API client library, see {@link GenerateInvoicesAction}.
*
* @see <a href="https://cloud.google.com/dataflow/docs/templates/overview">Dataflow Templates</a>
*/
public class InvoicingPipeline implements Serializable {
@Inject
@Config("projectId")
String projectId;
@Inject
@Config("apacheBeamBucketUrl")
String beamBucketUrl;
@Inject
@Config("invoiceTemplateUrl")
String invoiceTemplateUrl;
@Inject
@Config("beamStagingUrl")
String beamStagingUrl;
@Inject
@Config("billingBucketUrl")
String billingBucketUrl;
@Inject
@Config("invoiceFilePrefix")
String invoiceFilePrefix;
@Inject @LocalCredentialJson String credentialJson;
@Inject
InvoicingPipeline() {}
/** Custom options for running the invoicing pipeline. */
interface InvoicingPipelineOptions extends DataflowPipelineOptions {
/** Returns the yearMonth we're generating invoices for, in yyyy-MM format. */
@Description("The yearMonth we generate invoices for, in yyyy-MM format.")
ValueProvider<String> getYearMonth();
/**
* Sets the yearMonth we generate invoices for.
*
* <p>This is implicitly set when executing the Dataflow template, by specifying the 'yearMonth
* parameter.
*/
void setYearMonth(ValueProvider<String> value);
}
/** Deploys the invoicing pipeline as a template on GCS, for a given projectID and GCS bucket. */
public void deploy() {
// We can't store options as a member variable due to serialization concerns.
InvoicingPipelineOptions options = PipelineOptionsFactory.as(InvoicingPipelineOptions.class);
try {
options.setGcpCredential(
GoogleCredentials.fromStream(new ByteArrayInputStream(credentialJson.getBytes(UTF_8))));
} catch (IOException e) {
throw new RuntimeException(
"Cannot obtain local credential to deploy the invoicing pipeline", e);
}
options.setProject(projectId);
options.setRunner(DataflowRunner.class);
// This causes p.run() to stage the pipeline as a template on GCS, as opposed to running it.
options.setTemplateLocation(invoiceTemplateUrl);
options.setStagingLocation(beamStagingUrl);
Pipeline p = Pipeline.create(options);
PCollection<BillingEvent> billingEvents =
p.apply(
"Read BillingEvents from Bigquery",
BigQueryIO.read(BillingEvent::parseFromRecord)
.fromQuery(InvoicingUtils.makeQueryProvider(options.getYearMonth(), projectId))
.withCoder(SerializableCoder.of(BillingEvent.class))
.usingStandardSql()
.withoutValidation()
.withTemplateCompatibility());
applyTerminalTransforms(billingEvents, options.getYearMonth());
p.run();
}
/**
* Applies output transforms to the {@code BillingEvent} source collection.
*
* <p>This is factored out purely to facilitate testing.
*/
void applyTerminalTransforms(
PCollection<BillingEvent> billingEvents, ValueProvider<String> yearMonthProvider) {
billingEvents
.apply("Generate overall invoice rows", new GenerateInvoiceRows())
.apply("Write overall invoice to CSV", writeInvoice(yearMonthProvider));
billingEvents.apply(
"Write detail reports to separate CSVs keyed by registrarId_tld pair",
writeDetailReports(yearMonthProvider));
}
/** Transform that converts a {@code BillingEvent} into an invoice CSV row. */
private static class GenerateInvoiceRows
extends PTransform<PCollection<BillingEvent>, PCollection<String>> {
@Override
public PCollection<String> expand(PCollection<BillingEvent> input) {
return input
.apply(
"Map to invoicing key",
MapElements.into(TypeDescriptor.of(InvoiceGroupingKey.class))
.via(BillingEvent::getInvoiceGroupingKey))
.apply(Filter.by((InvoiceGroupingKey key) -> key.unitPrice() != 0))
.setCoder(new InvoiceGroupingKeyCoder())
.apply("Count occurrences", Count.perElement())
.apply(
"Format as CSVs",
MapElements.into(TypeDescriptors.strings())
.via((KV<InvoiceGroupingKey, Long> kv) -> kv.getKey().toCsv(kv.getValue())));
}
}
/** Returns an IO transform that writes the overall invoice to a single CSV file. */
private TextIO.Write writeInvoice(ValueProvider<String> yearMonthProvider) {
return TextIO.write()
.to(
NestedValueProvider.of(
yearMonthProvider,
yearMonth ->
String.format(
"%s/%s/%s/%s-%s",
billingBucketUrl,
BillingModule.INVOICES_DIRECTORY,
yearMonth,
invoiceFilePrefix,
yearMonth)))
.withHeader(InvoiceGroupingKey.invoiceHeader())
.withoutSharding()
.withSuffix(".csv");
}
/** Returns an IO transform that writes detail reports to registrar-tld keyed CSV files. */
private TextIO.TypedWrite<BillingEvent, Params> writeDetailReports(
ValueProvider<String> yearMonthProvider) {
return TextIO.<BillingEvent>writeCustomType()
.to(
InvoicingUtils.makeDestinationFunction(
String.format("%s/%s", billingBucketUrl, BillingModule.INVOICES_DIRECTORY),
yearMonthProvider),
InvoicingUtils.makeEmptyDestinationParams(billingBucketUrl + "/errors"))
.withFormatFunction(BillingEvent::toCsv)
.withoutSharding()
.withTempDirectory(
FileBasedSink.convertToFileResourceIfPossible(beamBucketUrl + "/temporary"))
.withHeader(BillingEvent.getHeader())
.withSuffix(".csv");
}
}

View file

@ -0,0 +1,106 @@
// Copyright 2018 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.beam.invoicing;
import static google.registry.beam.BeamUtils.getQueryFromFile;
import google.registry.util.SqlTemplate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.time.YearMonth;
import java.time.format.DateTimeFormatter;
import org.apache.beam.sdk.io.DefaultFilenamePolicy.Params;
import org.apache.beam.sdk.io.FileBasedSink;
import org.apache.beam.sdk.options.ValueProvider;
import org.apache.beam.sdk.options.ValueProvider.NestedValueProvider;
import org.apache.beam.sdk.transforms.SerializableFunction;
/** Pipeline helper functions used to generate invoices from instances of {@link BillingEvent}. */
public class InvoicingUtils {
private InvoicingUtils() {}
private static final DateTimeFormatter TIMESTAMP_FORMATTER =
DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSSSSS");
/**
* Returns a function mapping from {@code BillingEvent} to filename {@code Params}.
*
* <p>Beam uses this to determine which file a given {@code BillingEvent} should get placed into.
*
* @param outputBucket the GCS bucket we're outputting reports to
* @param yearMonthProvider a runtime provider for the yyyy-MM we're generating the invoice for
*/
static SerializableFunction<BillingEvent, Params> makeDestinationFunction(
String outputBucket, ValueProvider<String> yearMonthProvider) {
return billingEvent ->
new Params()
.withShardTemplate("")
.withSuffix(".csv")
.withBaseFilename(
NestedValueProvider.of(
yearMonthProvider,
yearMonth ->
FileBasedSink.convertToFileResourceIfPossible(
String.format(
"%s/%s/%s",
outputBucket, yearMonth, billingEvent.toFilename(yearMonth)))));
}
/**
* Returns the default filename parameters for an unmappable {@code BillingEvent}.
*
* <p>The "failed" file should only be populated when an error occurs, which warrants further
* investigation.
*/
static Params makeEmptyDestinationParams(String outputBucket) {
return new Params()
.withBaseFilename(
FileBasedSink.convertToFileResourceIfPossible(
String.format("%s/%s", outputBucket, "FAILURES")));
}
/**
* Returns a provider that creates a Bigquery query for a given project and yearMonth at runtime.
*
* <p>We only know yearMonth at runtime, so this provider fills in the {@code
* sql/billing_events.sql} template at runtime.
*
* @param yearMonthProvider a runtime provider that returns which month we're invoicing for.
* @param projectId the projectId we're generating invoicing for.
*/
static ValueProvider<String> makeQueryProvider(
ValueProvider<String> yearMonthProvider, String projectId) {
return NestedValueProvider.of(
yearMonthProvider,
(yearMonth) -> {
// Get the timestamp endpoints capturing the entire month with microsecond precision
YearMonth reportingMonth = YearMonth.parse(yearMonth);
LocalDateTime firstMoment = reportingMonth.atDay(1).atTime(LocalTime.MIDNIGHT);
LocalDateTime lastMoment = reportingMonth.atEndOfMonth().atTime(LocalTime.MAX);
// Construct the month's query by filling in the billing_events.sql template
return SqlTemplate.create(getQueryFromFile(InvoicingPipeline.class, "billing_events.sql"))
.put("FIRST_TIMESTAMP_OF_MONTH", firstMoment.format(TIMESTAMP_FORMATTER))
.put("LAST_TIMESTAMP_OF_MONTH", lastMoment.format(TIMESTAMP_FORMATTER))
.put("PROJECT_ID", projectId)
.put("DATASTORE_EXPORT_DATA_SET", "latest_datastore_export")
.put("ONETIME_TABLE", "OneTime")
.put("REGISTRY_TABLE", "Registry")
.put("REGISTRAR_TABLE", "Registrar")
.put("CANCELLATION_TABLE", "Cancellation")
.build();
});
}
}

View file

@ -0,0 +1,102 @@
#standardSQL
-- Copyright 2017 The Nomulus Authors. All Rights Reserved.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
-- This query gathers all non-canceled billing events for a given
-- YEAR_MONTH in yyyy-MM format.
SELECT
__key__.id AS id,
billingTime,
eventTime,
BillingEvent.clientId AS registrarId,
RegistrarData.accountId AS billingId,
RegistrarData.poNumber AS poNumber,
tld,
reason as action,
targetId as domain,
BillingEvent.domainRepoId as repositoryId,
periodYears as years,
BillingEvent.currency AS currency,
BillingEvent.amount as amount,
-- We'll strip out non-useful flags downstream
ARRAY_TO_STRING(flags, " ") AS flags
FROM (
SELECT
*,
-- We store cost as "CURRENCY AMOUNT" such as "JPY 800" or "USD 20.00"
SPLIT(cost, ' ')[OFFSET(0)] AS currency,
SPLIT(cost, ' ')[OFFSET(1)] AS amount,
-- Extract everything after the first dot in the domain as the TLD
REGEXP_EXTRACT(targetId, r'[.](.+)') AS tld,
-- __key__.path looks like '"DomainBase", "<repoId>", ...'
REGEXP_REPLACE(SPLIT(__key__.path, ', ')[OFFSET(1)], '"', '')
AS domainRepoId,
COALESCE(cancellationMatchingBillingEvent.path,
__key__.path) AS cancellationMatchingPath
FROM
`%PROJECT_ID%.%DATASTORE_EXPORT_DATA_SET%.%ONETIME_TABLE%`
-- Only include real TLDs (filter prober data)
WHERE
REGEXP_EXTRACT(targetId, r'[.](.+)') IN (
SELECT
tldStr
FROM
`%PROJECT_ID%.%DATASTORE_EXPORT_DATA_SET%.%REGISTRY_TABLE%`
WHERE
-- TODO(b/18092292): Add a filter for tldState (not PDT/PREDELEGATION)
tldType = 'REAL') ) AS BillingEvent
-- Gather billing ID from registrar table
-- This is a 'JOIN' as opposed to 'LEFT JOIN' to filter out
-- non-billable registrars
JOIN (
SELECT
__key__.name AS clientId,
billingIdentifier,
IFNULL(poNumber, '') AS poNumber,
r.billingAccountMap.currency[SAFE_OFFSET(index)] AS currency,
r.billingAccountMap.accountId[SAFE_OFFSET(index)] AS accountId
FROM
`%PROJECT_ID%.%DATASTORE_EXPORT_DATA_SET%.%REGISTRAR_TABLE%` AS r,
UNNEST(GENERATE_ARRAY(0, ARRAY_LENGTH(r.billingAccountMap.currency) - 1))
AS index
WHERE billingAccountMap IS NOT NULL
AND type = 'REAL') AS RegistrarData
ON
BillingEvent.clientId = RegistrarData.clientId
AND BillingEvent.currency = RegistrarData.currency
-- Gather cancellations
LEFT JOIN (
SELECT __key__.id AS cancellationId,
COALESCE(refOneTime.path, refRecurring.path) AS cancelledEventPath,
eventTime as cancellationTime,
billingTime as cancellationBillingTime
FROM
(SELECT
*,
-- Count everything after first dot as TLD (to support multi-part TLDs).
REGEXP_EXTRACT(targetId, r'[.](.+)') AS tld
FROM
`%PROJECT_ID%.%DATASTORE_EXPORT_DATA_SET%.%CANCELLATION_TABLE%`)
) AS Cancellation
ON BillingEvent.cancellationMatchingPath = Cancellation.cancelledEventPath
AND BillingEvent.billingTime = Cancellation.cancellationBillingTime
WHERE billingTime BETWEEN TIMESTAMP('%FIRST_TIMESTAMP_OF_MONTH%')
AND TIMESTAMP('%LAST_TIMESTAMP_OF_MONTH%')
-- Filter out canceled events
AND Cancellation.cancellationId IS NULL
ORDER BY
billingTime DESC,
id,
tld

View file

@ -0,0 +1,32 @@
package(
default_visibility = ["//visibility:public"],
)
licenses(["notice"]) # Apache 2.0
java_library(
name = "spec11",
srcs = glob(["*.java"]),
resources = glob(["sql/*"]),
deps = [
"//java/google/registry/beam",
"//java/google/registry/config",
"//java/google/registry/util",
"@com_google_auth_library_oauth2_http",
"@com_google_auto_value",
"@com_google_dagger",
"@com_google_flogger",
"@com_google_flogger_system_backend",
"@com_google_guava",
"@javax_inject",
"@joda_time",
"@org_apache_avro",
"@org_apache_beam_runners_direct_java",
"@org_apache_beam_runners_google_cloud_dataflow_java",
"@org_apache_beam_sdks_java_core",
"@org_apache_beam_sdks_java_io_google_cloud_platform",
"@org_apache_httpcomponents_httpclient",
"@org_apache_httpcomponents_httpcore",
"@org_json",
],
)

View file

@ -0,0 +1,248 @@
// Copyright 2018 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.beam.spec11;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.http.HttpStatus.SC_OK;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableSet;
import com.google.common.flogger.FluentLogger;
import com.google.common.io.CharStreams;
import google.registry.util.Retrier;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Serializable;
import java.net.URISyntaxException;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.function.Supplier;
import org.apache.beam.sdk.options.ValueProvider;
import org.apache.beam.sdk.transforms.DoFn;
import org.apache.beam.sdk.transforms.windowing.GlobalWindow;
import org.apache.beam.sdk.values.KV;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.protocol.HTTP;
import org.joda.time.Instant;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
/** Utilities and Beam {@code PTransforms} for interacting with the SafeBrowsing API. */
public class SafeBrowsingTransforms {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
/** The URL to send SafeBrowsing API calls (POSTS) to. */
private static final String SAFE_BROWSING_URL =
"https://safebrowsing.googleapis.com/v4/threatMatches:find";
/**
* {@link DoFn} mapping a {@link Subdomain} to its evaluation report from SafeBrowsing.
*
* <p>Refer to the Lookup API documentation for the request/response format and other details.
*
* @see <a href=https://developers.google.com/safe-browsing/v4/lookup-api>Lookup API</a>
*/
static class EvaluateSafeBrowsingFn extends DoFn<Subdomain, KV<Subdomain, ThreatMatch>> {
/**
* Max number of urls we can check in a single query.
*
* <p>The actual max is 500, but we leave a small gap in case of concurrency errors.
*/
private static final int BATCH_SIZE = 490;
/** Provides the SafeBrowsing API key at runtime. */
private final ValueProvider<String> apiKeyProvider;
/**
* Maps a subdomain's {@code fullyQualifiedDomainName} to its corresponding {@link Subdomain} to
* facilitate batching SafeBrowsing API requests.
*/
private final Map<String, Subdomain> subdomainBuffer = new LinkedHashMap<>(BATCH_SIZE);
/**
* Provides the HTTP client we use to interact with the SafeBrowsing API.
*
* <p>This is a supplier to enable mocking out the connection in unit tests while maintaining a
* serializable field.
*/
private final Supplier<CloseableHttpClient> closeableHttpClientSupplier;
/** Retries on receiving transient failures such as {@link IOException}. */
private final Retrier retrier;
/**
* Constructs a {@link EvaluateSafeBrowsingFn} that gets its API key from the given provider.
*
* <p>We need to dual-cast the closeableHttpClientSupplier lambda because all {@code DoFn}
* member variables need to be serializable. The (Supplier & Serializable) dual cast is safe
* because class methods are generally serializable, especially a static function such as {@link
* HttpClients#createDefault()}.
*
* @param apiKeyProvider provides the SafeBrowsing API key from {@code KMS} at runtime
*/
@SuppressWarnings("unchecked")
EvaluateSafeBrowsingFn(ValueProvider<String> apiKeyProvider, Retrier retrier) {
this.apiKeyProvider = apiKeyProvider;
this.retrier = retrier;
this.closeableHttpClientSupplier = (Supplier & Serializable) HttpClients::createDefault;
}
/**
* Constructs a {@link EvaluateSafeBrowsingFn}, allowing us to swap out the HTTP client supplier
* for testing.
*
* @param clientSupplier a serializable CloseableHttpClient supplier
*/
@VisibleForTesting
EvaluateSafeBrowsingFn(
ValueProvider<String> apiKeyProvider,
Retrier retrier,
Supplier<CloseableHttpClient> clientSupplier) {
this.apiKeyProvider = apiKeyProvider;
this.retrier = retrier;
this.closeableHttpClientSupplier = clientSupplier;
}
/** Evaluates any buffered {@link Subdomain} objects upon completing the bundle. */
@FinishBundle
public void finishBundle(FinishBundleContext context) {
if (!subdomainBuffer.isEmpty()) {
ImmutableSet<KV<Subdomain, ThreatMatch>> results = evaluateAndFlush();
results.forEach((kv) -> context.output(kv, Instant.now(), GlobalWindow.INSTANCE));
}
}
/**
* Buffers {@link Subdomain} objects until we reach the batch size, then bulk-evaluate the URLs
* with the SafeBrowsing API.
*/
@ProcessElement
public void processElement(ProcessContext context) {
Subdomain subdomain = context.element();
subdomainBuffer.put(subdomain.fullyQualifiedDomainName(), subdomain);
if (subdomainBuffer.size() >= BATCH_SIZE) {
ImmutableSet<KV<Subdomain, ThreatMatch>> results = evaluateAndFlush();
results.forEach(context::output);
}
}
/**
* Evaluates all {@link Subdomain} objects in the buffer and returns a list of key-value pairs
* from {@link Subdomain} to its SafeBrowsing report.
*
* <p>If a {@link Subdomain} is safe according to the API, it will not emit a report.
*/
private ImmutableSet<KV<Subdomain, ThreatMatch>> evaluateAndFlush() {
ImmutableSet.Builder<KV<Subdomain, ThreatMatch>> resultBuilder = new ImmutableSet.Builder<>();
try {
URIBuilder uriBuilder = new URIBuilder(SAFE_BROWSING_URL);
// Add the API key param
uriBuilder.addParameter("key", apiKeyProvider.get());
HttpPost httpPost = new HttpPost(uriBuilder.build());
httpPost.addHeader(HTTP.CONTENT_TYPE, ContentType.APPLICATION_JSON.toString());
JSONObject requestBody = createRequestBody();
httpPost.setEntity(new ByteArrayEntity(requestBody.toString().getBytes(UTF_8)));
// Retry transient exceptions such as IOException
retrier.callWithRetry(
() -> {
try (CloseableHttpClient client = closeableHttpClientSupplier.get();
CloseableHttpResponse response = client.execute(httpPost)) {
processResponse(response, resultBuilder);
}
},
IOException.class);
} catch (URISyntaxException | JSONException e) {
// Fail the pipeline on a parsing exception- this indicates the API likely changed.
throw new RuntimeException("Caught parsing exception, failing pipeline.", e);
} finally {
// Flush the buffer
subdomainBuffer.clear();
}
return resultBuilder.build();
}
/** Creates a JSON object matching the request format for the SafeBrowsing API. */
private JSONObject createRequestBody() throws JSONException {
// Accumulate all domain names to evaluate.
JSONArray threatArray = new JSONArray();
for (String fullyQualifiedDomainName : subdomainBuffer.keySet()) {
threatArray.put(new JSONObject().put("url", fullyQualifiedDomainName));
}
// Construct the JSON request body
return new JSONObject()
.put(
"client",
new JSONObject().put("clientId", "domainregistry").put("clientVersion", "0.0.1"))
.put(
"threatInfo",
new JSONObject()
.put(
"threatTypes",
new JSONArray()
.put("MALWARE")
.put("SOCIAL_ENGINEERING")
.put("UNWANTED_SOFTWARE"))
.put("platformTypes", new JSONArray().put("ANY_PLATFORM"))
.put("threatEntryTypes", new JSONArray().put("URL"))
.put("threatEntries", threatArray));
}
/**
* Iterates through all threat matches in the API response and adds them to the {@code
* resultBuilder}.
*/
private void processResponse(
CloseableHttpResponse response,
ImmutableSet.Builder<KV<Subdomain, ThreatMatch>> resultBuilder)
throws JSONException, IOException {
int statusCode = response.getStatusLine().getStatusCode();
if (statusCode != SC_OK) {
logger.atWarning().log("Got unexpected status code %s from response", statusCode);
} else {
// Unpack the response body
JSONObject responseBody =
new JSONObject(
CharStreams.toString(
new InputStreamReader(response.getEntity().getContent(), UTF_8)));
logger.atInfo().log("Got response: %s", responseBody.toString());
if (responseBody.length() == 0) {
logger.atInfo().log("Response was empty, no threats detected");
} else {
// Emit all Subdomains with their API results.
JSONArray threatMatches = responseBody.getJSONArray("matches");
for (int i = 0; i < threatMatches.length(); i++) {
JSONObject match = threatMatches.getJSONObject(i);
String url = match.getJSONObject("threat").getString("url");
Subdomain subdomain = subdomainBuffer.get(url);
resultBuilder.add(
KV.of(subdomain, ThreatMatch.create(match, subdomain.fullyQualifiedDomainName())));
}
}
}
}
}
}

View file

@ -0,0 +1,240 @@
// Copyright 2018 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.beam.spec11;
import static com.google.common.base.Preconditions.checkArgument;
import static google.registry.beam.BeamUtils.getQueryFromFile;
import com.google.auto.value.AutoValue;
import google.registry.beam.spec11.SafeBrowsingTransforms.EvaluateSafeBrowsingFn;
import google.registry.config.RegistryConfig.Config;
import google.registry.util.Retrier;
import google.registry.util.SqlTemplate;
import java.io.Serializable;
import javax.inject.Inject;
import org.apache.beam.runners.dataflow.DataflowRunner;
import org.apache.beam.runners.dataflow.options.DataflowPipelineOptions;
import org.apache.beam.sdk.Pipeline;
import org.apache.beam.sdk.coders.SerializableCoder;
import org.apache.beam.sdk.io.TextIO;
import org.apache.beam.sdk.io.gcp.bigquery.BigQueryIO;
import org.apache.beam.sdk.options.Description;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.options.ValueProvider;
import org.apache.beam.sdk.options.ValueProvider.NestedValueProvider;
import org.apache.beam.sdk.transforms.GroupByKey;
import org.apache.beam.sdk.transforms.MapElements;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.TypeDescriptor;
import org.apache.beam.sdk.values.TypeDescriptors;
import org.joda.time.LocalDate;
import org.joda.time.YearMonth;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
/**
* Definition of a Dataflow pipeline template, which generates a given month's spec11 report.
*
* <p>To stage this template on GCS, run the {@link
* google.registry.tools.DeploySpec11PipelineCommand} Nomulus command.
*
* <p>Then, you can run the staged template via the API client library, gCloud or a raw REST call.
*
* @see <a href="https://cloud.google.com/dataflow/docs/templates/overview">Dataflow Templates</a>
*/
public class Spec11Pipeline implements Serializable {
/**
* Returns the subdirectory spec11 reports reside in for a given local date in yyyy-MM-dd format.
*
* @see google.registry.beam.spec11.Spec11Pipeline
* @see google.registry.reporting.spec11.Spec11EmailUtils
*/
public static String getSpec11ReportFilePath(LocalDate localDate) {
YearMonth yearMonth = new YearMonth(localDate);
return String.format("icann/spec11/%s/SPEC11_MONTHLY_REPORT_%s", yearMonth, localDate);
}
/** The JSON object field into which we put the registrar's e-mail address for Spec11 reports. */
public static final String REGISTRAR_EMAIL_FIELD = "registrarEmailAddress";
/** The JSON object field into which we put the registrar's name for Spec11 reports. */
public static final String REGISTRAR_CLIENT_ID_FIELD = "registrarClientId";
/** The JSON object field we put the threat match array for Spec11 reports. */
public static final String THREAT_MATCHES_FIELD = "threatMatches";
@Inject
@Config("projectId")
String projectId;
@Inject
@Config("beamStagingUrl")
String beamStagingUrl;
@Inject
@Config("spec11TemplateUrl")
String spec11TemplateUrl;
@Inject
@Config("reportingBucketUrl")
String reportingBucketUrl;
@Inject Retrier retrier;
@Inject
Spec11Pipeline() {}
/** Custom options for running the spec11 pipeline. */
interface Spec11PipelineOptions extends DataflowPipelineOptions {
/** Returns the local date we're generating the report for, in yyyy-MM-dd format. */
@Description("The local date we generate the report for, in yyyy-MM-dd format.")
ValueProvider<String> getDate();
/**
* Sets the local date we generate invoices for.
*
* <p>This is implicitly set when executing the Dataflow template, by specifying the "date"
* parameter.
*/
void setDate(ValueProvider<String> value);
/** Returns the SafeBrowsing API key we use to evaluate subdomain health. */
@Description("The API key we use to access the SafeBrowsing API.")
ValueProvider<String> getSafeBrowsingApiKey();
/**
* Sets the SafeBrowsing API key we use.
*
* <p>This is implicitly set when executing the Dataflow template, by specifying the
* "safeBrowsingApiKey" parameter.
*/
void setSafeBrowsingApiKey(ValueProvider<String> value);
}
/** Deploys the spec11 pipeline as a template on GCS. */
public void deploy() {
// We can't store options as a member variable due to serialization concerns.
Spec11PipelineOptions options = PipelineOptionsFactory.as(Spec11PipelineOptions.class);
options.setProject(projectId);
options.setRunner(DataflowRunner.class);
// This causes p.run() to stage the pipeline as a template on GCS, as opposed to running it.
options.setTemplateLocation(spec11TemplateUrl);
options.setStagingLocation(beamStagingUrl);
Pipeline p = Pipeline.create(options);
PCollection<Subdomain> domains =
p.apply(
"Read active domains from BigQuery",
BigQueryIO.read(Subdomain::parseFromRecord)
.fromQuery(
SqlTemplate.create(getQueryFromFile(Spec11Pipeline.class, "subdomains.sql"))
.put("PROJECT_ID", projectId)
.put("DATASTORE_EXPORT_DATASET", "latest_datastore_export")
.put("REGISTRAR_TABLE", "Registrar")
.put("DOMAIN_BASE_TABLE", "DomainBase")
.build())
.withCoder(SerializableCoder.of(Subdomain.class))
.usingStandardSql()
.withoutValidation()
.withTemplateCompatibility());
evaluateUrlHealth(
domains,
new EvaluateSafeBrowsingFn(options.getSafeBrowsingApiKey(), retrier),
options.getDate());
p.run();
}
/**
* Evaluate each {@link Subdomain} URL via the SafeBrowsing API.
*
* <p>This is factored out to facilitate testing.
*/
void evaluateUrlHealth(
PCollection<Subdomain> domains,
EvaluateSafeBrowsingFn evaluateSafeBrowsingFn,
ValueProvider<String> dateProvider) {
PCollection<KV<Subdomain, ThreatMatch>> subdomains =
domains.apply("Run through SafeBrowsingAPI", ParDo.of(evaluateSafeBrowsingFn));
subdomains
.apply(
"Map registrar client ID to email/ThreatMatch pair",
MapElements.into(
TypeDescriptors.kvs(
TypeDescriptors.strings(), TypeDescriptor.of(EmailAndThreatMatch.class)))
.via(
(KV<Subdomain, ThreatMatch> kv) ->
KV.of(
kv.getKey().registrarClientId(),
EmailAndThreatMatch.create(
kv.getKey().registrarEmailAddress(), kv.getValue()))))
.apply("Group by registrar client ID", GroupByKey.create())
.apply(
"Convert results to JSON format",
MapElements.into(TypeDescriptors.strings())
.via(
(KV<String, Iterable<EmailAndThreatMatch>> kv) -> {
String clientId = kv.getKey();
checkArgument(
kv.getValue().iterator().hasNext(),
String.format(
"Registrar with ID %s had no corresponding threats", clientId));
String email = kv.getValue().iterator().next().email();
JSONObject output = new JSONObject();
try {
output.put(REGISTRAR_CLIENT_ID_FIELD, clientId);
output.put(REGISTRAR_EMAIL_FIELD, email);
JSONArray threatMatchArray = new JSONArray();
for (EmailAndThreatMatch emailAndThreatMatch : kv.getValue()) {
threatMatchArray.put(emailAndThreatMatch.threatMatch().toJSON());
}
output.put(THREAT_MATCHES_FIELD, threatMatchArray);
return output.toString();
} catch (JSONException e) {
throw new RuntimeException(
String.format(
"Encountered an error constructing the JSON for %s", kv.toString()),
e);
}
}))
.apply(
"Output to text file",
TextIO.write()
.to(
NestedValueProvider.of(
dateProvider,
date ->
String.format(
"%s/%s",
reportingBucketUrl,
getSpec11ReportFilePath(LocalDate.parse(date)))))
.withoutSharding()
.withHeader("Map from registrar email / name to detected subdomain threats:"));
}
@AutoValue
abstract static class EmailAndThreatMatch implements Serializable {
abstract String email();
abstract ThreatMatch threatMatch();
static EmailAndThreatMatch create(String email, ThreatMatch threatMatch) {
return new AutoValue_Spec11Pipeline_EmailAndThreatMatch(email, threatMatch);
}
}
}

View file

@ -0,0 +1,77 @@
// Copyright 2018 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.beam.spec11;
import static google.registry.beam.BeamUtils.checkFieldsNotNull;
import static google.registry.beam.BeamUtils.extractField;
import com.google.auto.value.AutoValue;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import java.io.Serializable;
import org.apache.avro.generic.GenericRecord;
import org.apache.beam.sdk.io.gcp.bigquery.SchemaAndRecord;
/**
* A POJO representing a single subdomain, parsed from a {@code SchemaAndRecord}.
*
* <p>This is a trivially serializable class that allows Beam to transform the results of a Bigquery
* query into a standard Java representation, giving us the type guarantees and ease of manipulation
* Bigquery lacks, while localizing any Bigquery-side failures to the {@link #parseFromRecord}
* function.
*/
@AutoValue
public abstract class Subdomain implements Serializable {
private static final ImmutableList<String> FIELD_NAMES =
ImmutableList.of("fullyQualifiedDomainName", "registrarClientId", "registrarEmailAddress");
/** Returns the fully qualified domain name. */
abstract String fullyQualifiedDomainName();
/** Returns the client ID of the associated registrar for this domain. */
abstract String registrarClientId();
/** Returns the email address of the registrar associated with this domain. */
abstract String registrarEmailAddress();
/**
* Constructs a {@link Subdomain} from an Apache Avro {@code SchemaAndRecord}.
*
* @see <a
* href=http://avro.apache.org/docs/1.7.7/api/java/org/apache/avro/generic/GenericData.Record.html>
* Apache AVRO GenericRecord</a>
*/
static Subdomain parseFromRecord(SchemaAndRecord schemaAndRecord) {
checkFieldsNotNull(FIELD_NAMES, schemaAndRecord);
GenericRecord record = schemaAndRecord.getRecord();
return create(
extractField(record, "fullyQualifiedDomainName"),
extractField(record, "registrarClientId"),
extractField(record, "registrarEmailAddress"));
}
/**
* Creates a concrete {@link Subdomain}.
*
* <p>This should only be used outside this class for testing- instances of {@link Subdomain}
* should otherwise come from {@link #parseFromRecord}.
*/
@VisibleForTesting
static Subdomain create(
String fullyQualifiedDomainName, String registrarClientId, String registrarEmailAddress) {
return new AutoValue_Subdomain(
fullyQualifiedDomainName, registrarClientId, registrarEmailAddress);
}
}

View file

@ -0,0 +1,80 @@
// Copyright 2018 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.beam.spec11;
import com.google.auto.value.AutoValue;
import java.io.Serializable;
import org.json.JSONException;
import org.json.JSONObject;
/** A POJO representing a threat match response from the {@code SafeBrowsing API}. */
@AutoValue
public abstract class ThreatMatch implements Serializable {
private static final String THREAT_TYPE_FIELD = "threatType";
private static final String PLATFORM_TYPE_FIELD = "platformType";
private static final String METADATA_FIELD = "threatEntryMetadata";
private static final String DOMAIN_NAME_FIELD = "fullyQualifiedDomainName";
/** Returns what kind of threat it is (malware, phishing etc.) */
public abstract String threatType();
/** Returns what platforms it affects (Windows, Linux etc.) */
abstract String platformType();
/**
* Returns a String representing a JSON Object containing arbitrary metadata associated with this
* threat, or "NONE" if there is no metadata to retrieve.
*
* <p>This ideally would be a {@link JSONObject} type, but can't be due to serialization
* requirements.
*/
abstract String metadata();
/** Returns the fully qualified domain name [SLD].[TLD] of the matched threat. */
public abstract String fullyQualifiedDomainName();
/**
* Constructs a {@link ThreatMatch} by parsing a {@code SafeBrowsing API} response {@link
* JSONObject}.
*
* @throws JSONException when encountering parse errors in the response format
*/
static ThreatMatch create(JSONObject threatMatchJSON, String fullyQualifiedDomainName)
throws JSONException {
return new AutoValue_ThreatMatch(
threatMatchJSON.getString(THREAT_TYPE_FIELD),
threatMatchJSON.getString(PLATFORM_TYPE_FIELD),
threatMatchJSON.has(METADATA_FIELD)
? threatMatchJSON.getJSONObject(METADATA_FIELD).toString()
: "NONE",
fullyQualifiedDomainName);
}
/** Returns a {@link JSONObject} representing a subset of this object's data. */
JSONObject toJSON() throws JSONException {
return new JSONObject()
.put(THREAT_TYPE_FIELD, threatType())
.put(PLATFORM_TYPE_FIELD, platformType())
.put(METADATA_FIELD, metadata())
.put(DOMAIN_NAME_FIELD, fullyQualifiedDomainName());
}
/** Parses a {@link JSONObject} and returns an equivalent {@link ThreatMatch}. */
public static ThreatMatch fromJSON(JSONObject threatMatch) throws JSONException {
return new AutoValue_ThreatMatch(
threatMatch.getString(THREAT_TYPE_FIELD),
threatMatch.getString(PLATFORM_TYPE_FIELD),
threatMatch.getString(METADATA_FIELD),
threatMatch.getString(DOMAIN_NAME_FIELD));
}
}

View file

@ -0,0 +1,49 @@
#standardSQL
-- Copyright 2018 The Nomulus Authors. All Rights Reserved.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
-- This query gathers all Subdomains active within a given yearMonth
-- and emits a row containing its fully qualified domain name
-- [SLD].[TLD], the current registrar's name, and the current registrar's
-- email address.
SELECT
domain.fullyQualifiedDomainName AS fullyQualifiedDomainName,
registrar.clientId AS registrarClientId,
COALESCE(registrar.emailAddress, '') AS registrarEmailAddress
FROM ( (
SELECT
fullyQualifiedDomainName,
currentSponsorClientId,
creationTime
FROM
`%PROJECT_ID%.%DATASTORE_EXPORT_DATASET%.%DOMAIN_BASE_TABLE%`
WHERE
-- Only include active registrations
-- Registrations that are active (not deleted) will have null deletionTime
-- because END_OF_TIME is an invalid timestamp in standardSQL
(SAFE_CAST(deletionTime AS STRING) IS NULL
OR deletionTime > CURRENT_TIMESTAMP)) AS domain
JOIN (
SELECT
__key__.name AS clientId,
emailAddress
FROM
`%PROJECT_ID%.%DATASTORE_EXPORT_DATASET%.%REGISTRAR_TABLE%`
WHERE
type = 'REAL') AS registrar
ON
domain.currentSponsorClientId = registrar.clientId)
ORDER BY
creationTime DESC

View file

@ -0,0 +1,27 @@
package(
default_visibility = ["//visibility:public"],
)
licenses(["notice"]) # Apache 2.0
java_library(
name = "bigquery",
srcs = glob(["*.java"]),
deps = [
"//java/google/registry/config",
"//java/google/registry/util",
"@com_google_api_client",
"@com_google_api_client_appengine",
"@com_google_apis_google_api_services_bigquery",
"@com_google_code_findbugs_jsr305",
"@com_google_dagger",
"@com_google_flogger",
"@com_google_flogger_system_backend",
"@com_google_guava",
"@com_google_http_client",
"@com_google_http_client_appengine",
"@com_google_http_client_jackson2",
"@javax_inject",
"@joda_time",
],
)

View file

@ -0,0 +1,754 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.bigquery;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.base.Strings.isNullOrEmpty;
import static com.google.common.base.Verify.verify;
import static com.google.common.util.concurrent.Futures.transform;
import static com.google.common.util.concurrent.Futures.transformAsync;
import static com.google.common.util.concurrent.MoreExecutors.directExecutor;
import static google.registry.bigquery.BigqueryUtils.toJobReferenceString;
import static google.registry.config.RegistryConfig.getProjectId;
import static org.joda.time.DateTimeZone.UTC;
import com.google.api.client.googleapis.json.GoogleJsonResponseException;
import com.google.api.client.http.AbstractInputStreamContent;
import com.google.api.services.bigquery.Bigquery;
import com.google.api.services.bigquery.model.Dataset;
import com.google.api.services.bigquery.model.DatasetReference;
import com.google.api.services.bigquery.model.ErrorProto;
import com.google.api.services.bigquery.model.GetQueryResultsResponse;
import com.google.api.services.bigquery.model.Job;
import com.google.api.services.bigquery.model.JobConfiguration;
import com.google.api.services.bigquery.model.JobConfigurationExtract;
import com.google.api.services.bigquery.model.JobConfigurationLoad;
import com.google.api.services.bigquery.model.JobConfigurationQuery;
import com.google.api.services.bigquery.model.JobReference;
import com.google.api.services.bigquery.model.JobStatistics;
import com.google.api.services.bigquery.model.JobStatus;
import com.google.api.services.bigquery.model.Table;
import com.google.api.services.bigquery.model.TableCell;
import com.google.api.services.bigquery.model.TableFieldSchema;
import com.google.api.services.bigquery.model.TableReference;
import com.google.api.services.bigquery.model.TableRow;
import com.google.api.services.bigquery.model.ViewDefinition;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableTable;
import com.google.common.flogger.FluentLogger;
import com.google.common.io.BaseEncoding;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import google.registry.bigquery.BigqueryUtils.DestinationFormat;
import google.registry.bigquery.BigqueryUtils.SourceFormat;
import google.registry.bigquery.BigqueryUtils.TableType;
import google.registry.bigquery.BigqueryUtils.WriteDisposition;
import google.registry.util.NonFinalForTesting;
import google.registry.util.Sleeper;
import google.registry.util.SqlTemplate;
import google.registry.util.SystemSleeper;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import javax.annotation.Nullable;
import javax.inject.Inject;
import org.joda.time.DateTime;
import org.joda.time.Duration;
/** Class encapsulating parameters and state for accessing the Bigquery API. */
public class BigqueryConnection implements AutoCloseable {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private static final Duration MIN_POLL_INTERVAL = Duration.millis(500);
@NonFinalForTesting
private static Sleeper sleeper = new SystemSleeper();
/** Default name of the default dataset to use for requests to the API. */
public static final String DEFAULT_DATASET_NAME = "testing";
/** Default dataset to use for storing temporary tables. */
private static final String TEMP_DATASET_NAME = "__temp__";
/** Default time to live for temporary tables. */
private static final Duration TEMP_TABLE_TTL = Duration.standardHours(24);
/** Bigquery client instance wrapped by this class. */
private final Bigquery bigquery;
/** Executor service for bigquery jobs. */
private ListeningExecutorService service;
/** Pseudo-randomness source to use for creating random table names. */
private Random random = new Random();
/** Name of the default dataset to use for inserting tables. */
private String datasetId = DEFAULT_DATASET_NAME;
/** Whether to automatically overwrite existing tables and views. */
private boolean overwrite = false;
/** Duration to wait between polls for job status. */
private Duration pollInterval = Duration.millis(1000);
BigqueryConnection(Bigquery bigquery) {
this.bigquery = bigquery;
}
/** Builder for a {@link BigqueryConnection}, since the latter is immutable once created. */
public static class Builder {
private BigqueryConnection instance;
@Inject
Builder(Bigquery bigquery) {
instance = new BigqueryConnection(bigquery);
}
/**
* The BigqueryConnection takes ownership of this {@link ExecutorService} and will
* shut it down when the BigqueryConnection is closed.
*/
public Builder setExecutorService(ExecutorService executorService) {
instance.service = MoreExecutors.listeningDecorator(executorService);
return this;
}
public Builder setDatasetId(String datasetId) {
instance.datasetId = checkNotNull(datasetId);
return this;
}
public Builder setOverwrite(boolean overwrite) {
instance.overwrite = overwrite;
return this;
}
public Builder setPollInterval(Duration pollInterval) {
checkArgument(
!pollInterval.isShorterThan(MIN_POLL_INTERVAL),
"poll interval must be at least %ldms", MIN_POLL_INTERVAL.getMillis());
instance.pollInterval = pollInterval;
return this;
}
public BigqueryConnection build() {
try {
checkNotNull(instance.service, "Must provide executor service");
instance.initialize();
return instance;
} catch (Throwable e) {
Throwables.throwIfUnchecked(e);
throw new RuntimeException("Cannot initialize BigqueryConnection", e);
} finally {
// Clear the internal instance so you can't accidentally mutate it through this builder.
instance = null;
}
}
}
/**
* Class that wraps a normal Bigquery API Table object to make it immutable from the client side
* and give it additional semantics as a "destination" for load or query jobs, with an overwrite
* flag set by the client upon creation.
*
* <p>Additionally provides encapsulation so that clients of BigqueryConnection don't need to take
* any direct dependencies on Bigquery API classes and can instead use DestinationTable.
*/
public static class DestinationTable {
/** The wrapped Bigquery API Table object. */
private final Table table;
/** The type of this table. */
private final TableType type;
/** The write disposition for jobs writing to this destination table. */
private final WriteDisposition writeDisposition;
/**
* A query to package with this table if the type is VIEW; not immutable but also not visible
* to clients.
*/
private String query;
/** A builder for DestinationTable. */
public static final class Builder {
private final Table table = new Table();
private final TableReference tableRef = new TableReference();
private TableType type = TableType.TABLE;
private WriteDisposition writeDisposition = WriteDisposition.WRITE_EMPTY;
public Builder datasetId(String datasetId) {
tableRef.setDatasetId(datasetId);
return this;
}
public Builder name(String name) {
tableRef.setTableId(name);
return this;
}
public Builder description(String description) {
table.setDescription(description);
return this;
}
public Builder type(TableType type) {
this.type = type;
return this;
}
public Builder timeToLive(Duration duration) {
this.table.setExpirationTime(new DateTime(UTC).plus(duration).getMillis());
return this;
}
public Builder overwrite(boolean overwrite) {
if (overwrite) {
this.writeDisposition = WriteDisposition.WRITE_TRUNCATE;
}
return this;
}
public Builder append(boolean append) {
if (append) {
this.writeDisposition = WriteDisposition.WRITE_APPEND;
}
return this;
}
public DestinationTable build() {
tableRef.setProjectId(getProjectId());
table.setTableReference(tableRef);
checkState(!isNullOrEmpty(table.getTableReference().getDatasetId()));
checkState(!isNullOrEmpty(table.getTableReference().getTableId()));
return new DestinationTable(this);
}
}
/** Constructs a new DestinationTable from its Builder. */
private DestinationTable(Builder b) {
table = b.table.clone();
type = b.type;
writeDisposition = b.writeDisposition;
}
/**
* Stores the provided query with this DestinationTable and returns it; used for packaging
* a query along with the DestinationTable before sending it to the table update logic.
*/
private DestinationTable withQuery(String query) {
checkState(type == TableType.VIEW);
this.query = query;
return this;
}
/** Returns a new copy of the Bigquery API Table object wrapped by this DestinationTable. */
private Table getTable() {
Table tableCopy = table.clone();
if (type == TableType.VIEW) {
tableCopy.setView(new ViewDefinition().setQuery(query));
}
return tableCopy;
}
/** Returns the write disposition that should be used for jobs writing to this table. */
private WriteDisposition getWriteDisposition() {
return writeDisposition;
}
/** Returns a new copy of the TableReference for the Table wrapped by this DestinationTable. */
private TableReference getTableReference() {
return table.getTableReference().clone();
}
/** Returns a string representation of the TableReference for the wrapped table. */
public String getStringReference() {
return tableReferenceToString(table.getTableReference());
}
/** Returns a string representation of the given TableReference. */
private static String tableReferenceToString(TableReference tableRef) {
return String.format(
"%s:%s.%s",
tableRef.getProjectId(),
tableRef.getDatasetId(),
tableRef.getTableId());
}
}
/**
* Initializes the BigqueryConnection object by setting up the API client and creating the default
* dataset if it doesn't exist.
*/
private BigqueryConnection initialize() throws Exception {
createDatasetIfNeeded(datasetId);
createDatasetIfNeeded(TEMP_DATASET_NAME);
return this;
}
/**
* Closes the BigqueryConnection object by shutting down the executor service. Clients
* should only call this after all ListenableFutures obtained from BigqueryConnection methods
* have resolved; this method does not block on their completion.
*/
@Override
public void close() {
service.shutdown();
}
/** Returns a partially built DestinationTable with the default dataset and overwrite behavior. */
public DestinationTable.Builder buildDestinationTable(String tableName) {
return new DestinationTable.Builder()
.datasetId(datasetId)
.type(TableType.TABLE)
.name(tableName)
.overwrite(overwrite);
}
/**
* Returns a partially built DestinationTable with a randomly generated name under the default
* temporary table dataset, with the default TTL and overwrite behavior.
*/
public DestinationTable.Builder buildTemporaryTable() {
return new DestinationTable.Builder()
.datasetId(TEMP_DATASET_NAME)
.type(TableType.TABLE)
.name(getRandomTableName())
.timeToLive(TEMP_TABLE_TTL)
.overwrite(overwrite);
}
/** Returns a random table name consisting only of the chars {@code [a-v0-9_]}. */
private String getRandomTableName() {
byte[] randBytes = new byte[8]; // 64 bits of randomness ought to be plenty.
random.nextBytes(randBytes);
return "_" + BaseEncoding.base32Hex().lowerCase().omitPadding().encode(randBytes);
}
/**
* Updates the specified Bigquery table to reflect the metadata from the input.
*
* <p>Returns the input DestinationTable. If the specified table does not already exist, it will
* be inserted into the dataset.
*
* <p>Clients can call this function directly to update a table on demand, or can pass it to
* Futures.transform() to update a table produced as the asynchronous result of a load or query
* job (e.g. to add a description to it).
*/
private DestinationTable updateTable(final DestinationTable destinationTable) {
Table table = destinationTable.getTable();
TableReference ref = table.getTableReference();
try {
if (checkTableExists(ref.getDatasetId(), ref.getTableId())) {
// Make sure to use patch() rather than update(). The former changes only those properties
// which are specified, while the latter would change everything, blanking out unspecified
// properties.
bigquery
.tables()
.patch(ref.getProjectId(), ref.getDatasetId(), ref.getTableId(), table)
.execute();
} else {
bigquery.tables().insert(ref.getProjectId(), ref.getDatasetId(), table).execute();
}
return destinationTable;
} catch (IOException e) {
throw BigqueryJobFailureException.create(e);
}
}
/**
* Starts an asynchronous load job to populate the specified destination table with the given
* source URIs and source format. Returns a ListenableFuture that holds the same destination
* table object on success.
*/
public ListenableFuture<DestinationTable> load(
DestinationTable dest,
SourceFormat sourceFormat,
Iterable<String> sourceUris) {
Job job = new Job()
.setConfiguration(new JobConfiguration()
.setLoad(new JobConfigurationLoad()
.setWriteDisposition(dest.getWriteDisposition().toString())
.setSourceFormat(sourceFormat.toString())
.setSourceUris(ImmutableList.copyOf(sourceUris))
.setDestinationTable(dest.getTableReference())));
return transform(runJobToCompletion(job, dest), this::updateTable, directExecutor());
}
/**
* Starts an asynchronous query job to populate the specified destination table with the results
* of the specified query, or if the table is a view, to update the view to reflect that query.
* Returns a ListenableFuture that holds the same destination table object on success.
*/
public ListenableFuture<DestinationTable> query(
String querySql,
DestinationTable dest) {
if (dest.type == TableType.VIEW) {
// Use Futures.transform() rather than calling apply() directly so that any exceptions thrown
// by calling updateTable will be propagated on the get() call, not from here.
return transform(
Futures.immediateFuture(dest.withQuery(querySql)), this::updateTable, directExecutor());
} else {
Job job = new Job()
.setConfiguration(new JobConfiguration()
.setQuery(new JobConfigurationQuery()
.setQuery(querySql)
.setDefaultDataset(getDataset())
.setWriteDisposition(dest.getWriteDisposition().toString())
.setDestinationTable(dest.getTableReference())));
return transform(runJobToCompletion(job, dest), this::updateTable, directExecutor());
}
}
/**
* Starts an asynchronous query job to dump the results of the specified query into a local
* ImmutableTable object, row-keyed by the row number (indexed from 1), column-keyed by the
* TableFieldSchema for that column, and with the value object as the cell value. Note that null
* values will not actually be null, but they can be checked for using Data.isNull().
*
* <p>Returns a ListenableFuture that holds the ImmutableTable on success.
*/
public ListenableFuture<ImmutableTable<Integer, TableFieldSchema, Object>>
queryToLocalTable(String querySql) {
Job job = new Job()
.setConfiguration(new JobConfiguration()
.setQuery(new JobConfigurationQuery()
.setQuery(querySql)
.setDefaultDataset(getDataset())));
return transform(runJobToCompletion(job), this::getQueryResults, directExecutor());
}
/**
* Returns the result of calling queryToLocalTable, but synchronously to avoid spawning new
* background threads, which App Engine doesn't support.
*
* @see <a href="https://cloud.google.com/appengine/docs/standard/java/runtime#Threads">
* App Engine Runtime</a>
*
* <p>Returns the results of the query in an ImmutableTable on success.
*/
public ImmutableTable<Integer, TableFieldSchema, Object> queryToLocalTableSync(String querySql) {
Job job = new Job()
.setConfiguration(new JobConfiguration()
.setQuery(new JobConfigurationQuery()
.setQuery(querySql)
.setDefaultDataset(getDataset())));
return getQueryResults(runJob(job));
}
/**
* Returns the query results for the given job as an ImmutableTable, row-keyed by row number
* (indexed from 1), column-keyed by the TableFieldSchema for that field, and with the value
* object as the cell value. Note that null values will not actually be null (since we're using
* ImmutableTable) but they can be checked for using Data.isNull().
*
* <p>This table is fully materialized in memory (not lazily loaded), so it should not be used
* with queries expected to return large results.
*/
private ImmutableTable<Integer, TableFieldSchema, Object> getQueryResults(Job job) {
try {
ImmutableTable.Builder<Integer, TableFieldSchema, Object> builder =
new ImmutableTable.Builder<>();
String pageToken = null;
int rowNumber = 1;
while (true) {
GetQueryResultsResponse queryResults = bigquery.jobs()
.getQueryResults(getProjectId(), job.getJobReference().getJobId())
.setPageToken(pageToken)
.execute();
// If the job isn't complete yet, retry; getQueryResults() waits for up to 10 seconds on
// each invocation so this will effectively poll for completion.
if (queryResults.getJobComplete()) {
List<TableFieldSchema> schemaFields = queryResults.getSchema().getFields();
for (TableRow row : queryResults.getRows()) {
Iterator<TableFieldSchema> fieldIterator = schemaFields.iterator();
Iterator<TableCell> cellIterator = row.getF().iterator();
while (fieldIterator.hasNext() && cellIterator.hasNext()) {
builder.put(rowNumber, fieldIterator.next(), cellIterator.next().getV());
}
rowNumber++;
}
pageToken = queryResults.getPageToken();
if (pageToken == null) {
break;
}
}
}
return builder.build();
} catch (IOException e) {
throw BigqueryJobFailureException.create(e);
}
}
/**
* Starts an asynchronous job to extract the specified source table and output it to the
* given GCS filepath in the specified destination format, optionally printing headers.
* Returns a ListenableFuture that holds the destination GCS URI on success.
*/
private ListenableFuture<String> extractTable(
DestinationTable sourceTable,
String destinationUri,
DestinationFormat destinationFormat,
boolean printHeader) {
checkArgument(sourceTable.type == TableType.TABLE);
Job job = new Job()
.setConfiguration(new JobConfiguration()
.setExtract(new JobConfigurationExtract()
.setSourceTable(sourceTable.getTableReference())
.setDestinationFormat(destinationFormat.toString())
.setDestinationUris(ImmutableList.of(destinationUri))
.setPrintHeader(printHeader)));
return runJobToCompletion(job, destinationUri);
}
/**
* Starts an asynchronous job to extract the specified source table or view and output it to the
* given GCS filepath in the specified destination format, optionally printing headers.
* Returns a ListenableFuture that holds the destination GCS URI on success.
*/
public ListenableFuture<String> extract(
DestinationTable sourceTable,
String destinationUri,
DestinationFormat destinationFormat,
boolean printHeader) {
if (sourceTable.type == TableType.TABLE) {
return extractTable(sourceTable, destinationUri, destinationFormat, printHeader);
} else {
// We can't extract directly from a view, so instead extract from a query dumping that view.
return extractQuery(
SqlTemplate
.create("SELECT * FROM [%DATASET%.%TABLE%]")
.put("DATASET", sourceTable.getTableReference().getDatasetId())
.put("TABLE", sourceTable.getTableReference().getTableId())
.build(),
destinationUri,
destinationFormat,
printHeader);
}
}
/**
* Starts an asynchronous job to run the provided query, store the results in a temporary table,
* and then extract the contents of that table to the given GCS filepath in the specified
* destination format, optionally printing headers.
*
* <p>Returns a ListenableFuture that holds the destination GCS URI on success.
*/
public ListenableFuture<String> extractQuery(
String querySql,
final String destinationUri,
final DestinationFormat destinationFormat,
final boolean printHeader) {
// Note: although BigQuery queries save their results to an auto-generated anonymous table,
// we can't rely on that for running the extract job because it may not be fully replicated.
// Tracking bug for query-to-GCS support is b/13777340.
DestinationTable tempTable = buildTemporaryTable().build();
return transformAsync(
query(querySql, tempTable),
tempTable1 -> extractTable(tempTable1, destinationUri, destinationFormat, printHeader),
directExecutor());
}
/** @see #runJob(Job, AbstractInputStreamContent) */
public Job runJob(Job job) {
return runJob(job, null);
}
/**
* Launch a job, wait for it to complete, but <i>do not</i> check for errors.
*
* @throws BigqueryJobFailureException
*/
public Job runJob(Job job, @Nullable AbstractInputStreamContent data) {
return checkJob(waitForJob(launchJob(job, data)));
}
/**
* Launch a job, but do not wait for it to complete.
*
* @throws BigqueryJobFailureException
*/
private Job launchJob(Job job, @Nullable AbstractInputStreamContent data) {
verify(job.getStatus() == null);
try {
return data != null
? bigquery.jobs().insert(getProjectId(), job, data).execute()
: bigquery.jobs().insert(getProjectId(), job).execute();
} catch (IOException e) {
throw BigqueryJobFailureException.create(e);
}
}
/**
* Synchronously waits for a job to complete that's already been launched.
*
* @throws BigqueryJobFailureException
*/
private Job waitForJob(Job job) {
verify(job.getStatus() != null);
while (!job.getStatus().getState().equals("DONE")) {
sleeper.sleepUninterruptibly(pollInterval);
JobReference ref = job.getJobReference();
try {
job = bigquery.jobs().get(ref.getProjectId(), ref.getJobId()).execute();
} catch (IOException e) {
throw BigqueryJobFailureException.create(e);
}
}
return job;
}
/**
* Checks completed job for errors.
*
* @throws BigqueryJobFailureException
*/
private static Job checkJob(Job job) {
verify(job.getStatus() != null);
JobStatus jobStatus = job.getStatus();
if (jobStatus.getErrorResult() != null) {
throw BigqueryJobFailureException.create(jobStatus);
} else {
logger.atInfo().log(summarizeCompletedJob(job));
if (jobStatus.getErrors() != null) {
for (ErrorProto error : jobStatus.getErrors()) {
logger.atWarning().log("%s: %s", error.getReason(), error.getMessage());
}
}
return job;
}
}
/** Returns a summarization of a completed job's statistics for logging. */
private static String summarizeCompletedJob(Job job) {
JobStatistics stats = job.getStatistics();
return String.format(
"Job took %,.3f seconds after a %,.3f second delay and processed %,d bytes (%s)",
(stats.getEndTime() - stats.getStartTime()) / 1000.0,
(stats.getStartTime() - stats.getCreationTime()) / 1000.0,
stats.getTotalBytesProcessed(),
toJobReferenceString(job.getJobReference()));
}
private <T> ListenableFuture<T> runJobToCompletion(Job job, T result) {
return runJobToCompletion(job, result, null);
}
/** Runs job and returns a future that yields {@code result} when {@code job} is completed. */
private <T> ListenableFuture<T> runJobToCompletion(
final Job job,
final T result,
@Nullable final AbstractInputStreamContent data) {
return service.submit(
() -> {
runJob(job, data);
return result;
});
}
private ListenableFuture<Job> runJobToCompletion(final Job job) {
return service.submit(() -> runJob(job, null));
}
/** Helper that returns true if a dataset with this name exists. */
public boolean checkDatasetExists(String datasetName) throws IOException {
try {
bigquery.datasets().get(getProjectId(), datasetName).execute();
return true;
} catch (GoogleJsonResponseException e) {
if (e.getDetails() != null && e.getDetails().getCode() == 404) {
return false;
}
throw e;
}
}
/** Helper that returns true if a table with this name and dataset name exists. */
public boolean checkTableExists(String datasetName, String tableName) throws IOException {
try {
bigquery.tables().get(getProjectId(), datasetName, tableName).execute();
return true;
} catch (GoogleJsonResponseException e) {
if (e.getDetails() != null && e.getDetails().getCode() == 404) {
return false;
}
throw e;
}
}
/** Returns the dataset name that this bigquery connection uses by default. */
public String getDatasetId() {
return datasetId;
}
/** Returns dataset reference that can be used to avoid having to specify dataset in SQL code. */
public DatasetReference getDataset() {
return new DatasetReference()
.setProjectId(getProjectId())
.setDatasetId(getDatasetId());
}
/** Returns table reference with the projectId and datasetId filled out for you. */
public TableReference getTable(String tableName) {
return new TableReference()
.setProjectId(getProjectId())
.setDatasetId(getDatasetId())
.setTableId(tableName);
}
/**
* Helper that creates a dataset with this name if it doesn't already exist, and returns true
* if creation took place.
*/
public boolean createDatasetIfNeeded(String datasetName) throws IOException {
if (!checkDatasetExists(datasetName)) {
bigquery.datasets()
.insert(getProjectId(), new Dataset().setDatasetReference(new DatasetReference()
.setProjectId(getProjectId())
.setDatasetId(datasetName)))
.execute();
System.err.printf("Created dataset: %s:%s\n", getProjectId(), datasetName);
return true;
}
return false;
}
/** Create a table from a SQL query if it doesn't already exist. */
public TableReference ensureTable(TableReference table, String sqlQuery) {
try {
runJob(new Job()
.setConfiguration(new JobConfiguration()
.setQuery(new JobConfigurationQuery()
.setQuery(sqlQuery)
.setDefaultDataset(getDataset())
.setDestinationTable(table))));
} catch (BigqueryJobFailureException e) {
if (e.getReason().equals("duplicate")) {
// Table already exists.
} else {
throw e;
}
}
return table;
}
}

View file

@ -0,0 +1,124 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.bigquery;
import static com.google.common.base.Preconditions.checkArgument;
import com.google.api.client.googleapis.json.GoogleJsonError;
import com.google.api.client.googleapis.json.GoogleJsonResponseException;
import com.google.api.services.bigquery.model.ErrorProto;
import com.google.api.services.bigquery.model.JobStatus;
import com.google.common.collect.Iterables;
import java.io.IOException;
import javax.annotation.Nullable;
/** Generic exception to throw if a Bigquery job fails. */
public final class BigqueryJobFailureException extends RuntimeException {
/** Delegate {@link IOException} errors, checking for {@link GoogleJsonResponseException} */
public static BigqueryJobFailureException create(IOException cause) {
if (cause instanceof GoogleJsonResponseException) {
return create((GoogleJsonResponseException) cause);
} else {
return new BigqueryJobFailureException(cause.getMessage(), cause, null, null);
}
}
/** Create an error for JSON server response errors. */
public static BigqueryJobFailureException create(GoogleJsonResponseException cause) {
GoogleJsonError err = cause.getDetails();
if (err != null) {
return new BigqueryJobFailureException(err.getMessage(), null, null, err);
} else {
return new BigqueryJobFailureException(cause.getMessage(), cause, null, null);
}
}
/** Create an error from a failed job. */
public static BigqueryJobFailureException create(JobStatus jobStatus) {
checkArgument(jobStatus.getErrorResult() != null, "this job didn't fail!");
return new BigqueryJobFailureException(
describeError(jobStatus.getErrorResult()), null, jobStatus, null);
}
@Nullable
private final JobStatus jobStatus;
@Nullable
private final GoogleJsonError jsonError;
public BigqueryJobFailureException(
String message,
@Nullable Throwable cause,
@Nullable JobStatus jobStatus,
@Nullable GoogleJsonError jsonError) {
super(message, cause);
this.jobStatus = jobStatus;
this.jsonError = jsonError;
}
/**
* Returns a short error code describing why this job failed.
*
* <h3>Sample Reasons</h3>
*
* <ul>
* <li>{@code "duplicate"}: The table you're trying to create already exists.
* <li>{@code "invalidQuery"}: Query syntax error of some sort.
* <li>{@code "unknown"}: Non-Bigquery errors.
* </ul>
*
* @see <a href="https://cloud.google.com/bigquery/troubleshooting-errors">
* Troubleshooting Errors</a>
*/
public String getReason() {
if (jobStatus != null) {
return jobStatus.getErrorResult().getReason();
} else if (jsonError != null) {
return Iterables.getLast(jsonError.getErrors()).getReason();
} else {
return "unknown";
}
}
@Override
public String getMessage() {
StringBuilder result = new StringBuilder();
result.append(String.format("%s: %s", getClass().getSimpleName(), super.getMessage()));
try {
if (jobStatus != null) {
for (ErrorProto error : jobStatus.getErrors()) {
result.append("\n---------------------------------- BEGIN DEBUG INFO\n");
result.append(describeError(error));
result.append('\n');
result.append(error.getDebugInfo());
result.append("\n---------------------------------- END DEBUG INFO");
}
}
if (jsonError != null) {
String extraInfo = jsonError.toPrettyString();
result.append('\n');
result.append(extraInfo);
}
} catch (IOException e) {
result.append(e);
}
return result.toString();
}
private static String describeError(ErrorProto error) {
return String.format("%s: %s", error.getReason(), error.getMessage());
}
}

View file

@ -0,0 +1,46 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.bigquery;
import com.google.api.client.googleapis.auth.oauth2.GoogleCredential;
import com.google.api.services.bigquery.Bigquery;
import com.google.api.services.bigquery.model.TableFieldSchema;
import com.google.common.collect.ImmutableList;
import dagger.Module;
import dagger.Provides;
import dagger.multibindings.Multibinds;
import google.registry.config.CredentialModule.DefaultCredential;
import google.registry.config.RegistryConfig.Config;
import java.util.Map;
/** Dagger module for Google {@link Bigquery} connection objects. */
@Module
public abstract class BigqueryModule {
/** Provides a map of BigQuery table names to field names. */
@Multibinds
abstract Map<String, ImmutableList<TableFieldSchema>> bigquerySchemas();
@Provides
static Bigquery provideBigquery(
@DefaultCredential GoogleCredential credential, @Config("projectId") String projectId) {
return new Bigquery.Builder(credential.getTransport(), credential.getJsonFactory(), credential)
.setApplicationName(projectId)
.build();
}
// No subclasses.
private BigqueryModule() {}
}

View file

@ -0,0 +1,168 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.bigquery;
import com.google.api.services.bigquery.model.JobReference;
import java.util.concurrent.TimeUnit;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormatter;
import org.joda.time.format.DateTimeFormatterBuilder;
import org.joda.time.format.DateTimeParser;
import org.joda.time.format.ISODateTimeFormat;
/** Utilities related to Bigquery. */
public class BigqueryUtils {
/** Bigquery modes for schema fields. */
public enum FieldMode {
NULLABLE,
REQUIRED,
REPEATED;
/** Return the name of the field mode as it should appear in the Bigquery schema. */
public String schemaName() {
return name();
}
}
/** Bigquery schema field types. */
public enum FieldType {
STRING,
INTEGER,
FLOAT,
TIMESTAMP,
RECORD,
BOOLEAN;
/** Return the name of the field type as it should appear in the Bigquery schema. */
public String schemaName() {
return name();
}
}
/** Source formats for Bigquery load jobs. */
public enum SourceFormat {
CSV,
NEWLINE_DELIMITED_JSON,
DATASTORE_BACKUP
}
/** Destination formats for Bigquery extract jobs. */
public enum DestinationFormat {
CSV,
NEWLINE_DELIMITED_JSON
}
/** Bigquery table types (i.e. regular table or view). */
public enum TableType {
TABLE,
VIEW
}
/**
* Bigquery write dispositions (i.e. what to do about writing to an existing table).
*
* @see <a href="https://developers.google.com/bigquery/docs/reference/v2/jobs">API docs</a>
*/
public enum WriteDisposition {
/** Only write to the table if there is no existing table or if it is empty. */
WRITE_EMPTY,
/** If the table already exists, overwrite it with the new data. */
WRITE_TRUNCATE,
/** If the table already exists, append the data to the table. */
WRITE_APPEND
}
/**
* A {@code DateTimeFormatter} that defines how to print DateTimes in a string format that
* BigQuery can interpret and how to parse the string formats that BigQuery emits into DateTimes.
*
* <p>The general format definition is "YYYY-MM-DD HH:MM:SS.SSS[ ZZ]", where the fractional
* seconds portion can have 0-6 decimal places (although we restrict it to 0-3 here since Joda
* DateTime only supports up to millisecond precision) and the zone if not specified defaults to
* UTC.
*
* <p>Although we expect a zone specification of "UTC" when parsing, we don't emit it when
* printing because in some cases BigQuery does not allow any time zone specification (instead it
* assumes UTC for whatever input you provide) for input timestamp strings (see b/16380363).
*
* @see <a href="https://cloud.google.com/bigquery/data-types#timestamp-type">
* BigQuery Data Types - TIMESTAMP</a>
*/
public static final DateTimeFormatter BIGQUERY_TIMESTAMP_FORMAT = new DateTimeFormatterBuilder()
.append(ISODateTimeFormat.date())
.appendLiteral(' ')
.append(
// For printing, always print out the milliseconds.
ISODateTimeFormat.hourMinuteSecondMillis().getPrinter(),
// For parsing, we need a series of parsers to correctly handle the milliseconds.
new DateTimeParser[] {
// Try to parse the time with milliseconds first, which requires at least one
// fractional second digit, and if that fails try to parse without milliseconds.
ISODateTimeFormat.hourMinuteSecondMillis().getParser(),
ISODateTimeFormat.hourMinuteSecond().getParser()})
// Print UTC as the empty string since BigQuery's TIMESTAMP() function does not accept any
// time zone specification, but require "UTC" on parsing. Since we force this formatter to
// always use UTC below, the other arguments do not matter. If b/16380363 ever gets resolved
// this could be simplified to appendLiteral(" UTC").
.appendTimeZoneOffset("", " UTC", false, 1, 1)
.toFormatter()
.withZoneUTC();
/**
* Returns the human-readable string version of the given DateTime, suitable for conversion
* within BigQuery from a string literal into a BigQuery timestamp type.
*/
public static String toBigqueryTimestampString(DateTime dateTime) {
return BIGQUERY_TIMESTAMP_FORMAT.print(dateTime);
}
/** Returns the DateTime for a given human-readable string-formatted BigQuery timestamp. */
public static DateTime fromBigqueryTimestampString(String timestampString) {
return BIGQUERY_TIMESTAMP_FORMAT.parseDateTime(timestampString);
}
/**
* Converts a time (in TimeUnits since the epoch) into a numeric string that BigQuery understands
* as a timestamp: the decimal number of seconds since the epoch, precise up to microseconds.
*
* @see <a href="https://developers.google.com/bigquery/timestamp">Data Types</a>
*/
public static String toBigqueryTimestamp(long timestamp, TimeUnit unit) {
long seconds = unit.toSeconds(timestamp);
long fractionalSeconds = unit.toMicros(timestamp) % 1000000;
return String.format("%d.%06d", seconds, fractionalSeconds);
}
/**
* Converts a {@link DateTime} into a numeric string that BigQuery understands as a timestamp:
* the decimal number of seconds since the epoch, precise up to microseconds.
*
* <p>Note that since {@code DateTime} only stores milliseconds, the last 3 digits will be zero.
*
* @see <a href="https://developers.google.com/bigquery/timestamp">Data Types</a>
*/
public static String toBigqueryTimestamp(DateTime dateTime) {
return toBigqueryTimestamp(dateTime.getMillis(), TimeUnit.MILLISECONDS);
}
/**
* Returns the canonical string format for a JobReference object (the project ID and then job ID,
* delimited by a single colon) since JobReference.toString() is not customized to return it.
*/
public static String toJobReferenceString(JobReference jobRef) {
return jobRef.getProjectId() + ":" + jobRef.getJobId();
}
}

View file

@ -0,0 +1,128 @@
// Copyright 2018 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.bigquery;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.Sets.newConcurrentHashSet;
import com.google.api.services.bigquery.Bigquery;
import com.google.api.services.bigquery.model.Dataset;
import com.google.api.services.bigquery.model.DatasetReference;
import com.google.api.services.bigquery.model.Table;
import com.google.api.services.bigquery.model.TableFieldSchema;
import com.google.api.services.bigquery.model.TableReference;
import com.google.api.services.bigquery.model.TableSchema;
import com.google.common.collect.ImmutableList;
import com.google.common.flogger.FluentLogger;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.inject.Inject;
/** Wrapper of {@link Bigquery} with validation helpers. */
public class CheckedBigquery {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
// Cross-request caches to avoid unnecessary RPCs.
private static Set<String> knownExistingDatasets = newConcurrentHashSet();
private static Set<String> knownExistingTables = newConcurrentHashSet();
@Inject Map<String, ImmutableList<TableFieldSchema>> bigquerySchemas;
@Inject Bigquery bigquery;
@Inject
CheckedBigquery() {}
/**
* Returns a new connection to Bigquery, first ensuring that the given dataset exists in the
* project with the given id, creating it if required.
*/
public Bigquery ensureDataSetExists(String projectId, String datasetId) throws IOException {
// Note: it's safe for multiple threads to call this as the dataset will only be created once.
if (!knownExistingDatasets.contains(datasetId)) {
ensureDataset(bigquery, projectId, datasetId);
knownExistingDatasets.add(datasetId);
}
return bigquery;
}
/**
* Returns a new connection to Bigquery, first ensuring that the given dataset and table exist in
* project with the given id, creating them if required.
*/
public Bigquery ensureDataSetAndTableExist(String projectId, String datasetId, String tableId)
throws IOException {
ensureDataSetExists(projectId, datasetId);
checkArgument(bigquerySchemas.containsKey(tableId), "Unknown table ID: %s", tableId);
if (!knownExistingTables.contains(tableId)) {
ensureTable(
bigquery,
new TableReference()
.setDatasetId(datasetId)
.setProjectId(projectId)
.setTableId(tableId),
bigquerySchemas.get(tableId));
knownExistingTables.add(tableId);
}
return bigquery;
}
/**
* Ensures the dataset exists by trying to create it. Note that it's not appreciably cheaper
* to check for dataset existence than it is to try to create it and check for exceptions.
*/
// Note that these are not static so they can be mocked for testing.
private void ensureDataset(Bigquery bigquery, String projectId, String datasetId)
throws IOException {
try {
bigquery.datasets()
.insert(projectId,
new Dataset().setDatasetReference(
new DatasetReference()
.setProjectId(projectId)
.setDatasetId(datasetId)))
.execute();
} catch (IOException e) {
// Swallow errors about a duplicate dataset, and throw any other ones.
if (!BigqueryJobFailureException.create(e).getReason().equals("duplicate")) {
throw e;
}
}
}
/** Ensures the table exists in Bigquery. */
private void ensureTable(Bigquery bigquery, TableReference table, List<TableFieldSchema> schema)
throws IOException {
try {
bigquery.tables().insert(table.getProjectId(), table.getDatasetId(), new Table()
.setSchema(new TableSchema().setFields(schema))
.setTableReference(table))
.execute();
logger.atInfo().log(
"Created BigQuery table %s:%s.%s",
table.getProjectId(), table.getDatasetId(), table.getTableId());
} catch (IOException e) {
// Swallow errors about a table that exists, and throw any other ones.
if (!BigqueryJobFailureException.create(e).getReason().equals("duplicate")) {
throw e;
}
}
}
}

View file

@ -0,0 +1,16 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
@javax.annotation.ParametersAreNonnullByDefault
package google.registry.bigquery;

View file

@ -0,0 +1,5 @@
package(
default_visibility = ["//java/google/registry:registry_project"],
)
licenses(["notice"]) # Apache 2.0

View file

@ -0,0 +1,60 @@
# Copyright 2017 The Nomulus Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common routines for Nomulus build rules."""
ZIPPER = "@bazel_tools//tools/zip:zipper"
def long_path(ctx, file_):
"""Constructs canonical runfile path relative to TEST_SRCDIR.
Args:
ctx: A Skylark rule context.
file_: A File object that should appear in the runfiles for the test.
Returns:
A string path relative to TEST_SRCDIR suitable for use in tests and
testing infrastructure.
"""
if file_.short_path.startswith("../"):
return file_.short_path[3:]
if file_.owner and file_.owner.workspace_root:
return file_.owner.workspace_root + "/" + file_.short_path
return ctx.workspace_name + "/" + file_.short_path
def collect_runfiles(targets):
"""Aggregates runfiles from targets.
Args:
targets: A list of Bazel targets.
Returns:
A list of Bazel files.
"""
data = depset()
for target in targets:
if hasattr(target, "runfiles"):
data += target.runfiles.files
continue
if hasattr(target, "data_runfiles"):
data += target.data_runfiles.files
if hasattr(target, "default_runfiles"):
data += target.default_runfiles.files
return data
def _get_runfiles(target, attribute):
runfiles = getattr(target, attribute, None)
if runfiles:
return runfiles.files
return []

View file

@ -0,0 +1,53 @@
# Copyright 2017 The Nomulus Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build macro for creating App Engine EAR archives for Nomulus."""
load("//java/google/registry/builddefs:defs.bzl", "ZIPPER")
def registry_ear_file(name, out, configs, wars, **kwargs):
"""Creates an EAR archive by combining WAR archives."""
cmd = [
"set -e",
"repo=$$(pwd)",
"zipper=$$repo/$(location %s)" % ZIPPER,
"tmp=$$(mktemp -d $${TMPDIR:-/tmp}/registry_ear_file.XXXXXXXXXX)",
"cd $${tmp}",
]
for target, dest in configs.items():
cmd += [
"mkdir -p $${tmp}/$$(dirname %s)" % dest,
"ln -s $${repo}/$(location %s) $${tmp}/%s" % (target, dest),
]
for target, dest in wars.items():
cmd += [
"mkdir " + dest,
"cd " + dest,
"$${zipper} x $${repo}/$(location %s)" % target,
"cd ..",
]
cmd += [
"$${zipper} cC $${repo}/$@ $$(find . | sed 1d | cut -c 3- | LC_ALL=C sort)",
"cd $${repo}",
"rm -rf $${tmp}",
]
native.genrule(
name = name,
srcs = configs.keys() + wars.keys(),
outs = [out],
cmd = "\n".join(cmd),
tools = [ZIPPER],
message = "Generating EAR archive",
**kwargs
)

View file

@ -0,0 +1,256 @@
# Copyright 2017 The Nomulus Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Zip file creator that allows arbitrary path renaming.
This rule takes two main inputs: a bunch of filesets and a dictionary of
hard-coded source to dest mappings. It then applies those mappings to the input
file paths, to create a zip file with the same name as the rule.
The following preconditions must be met:
- Sources and destinations can't begin or end with slash.
- Every file must be matched by a mapping.
- Every mapping must match something.
The source can either be an exact match or a prefix.
- If a match is exact, the destination replaces the entire path. If the
destination path is empty, then the path remains the same.
- If the match is a prefix, then the destination replaces the source prefix in
the path. If the destination is empty, then the source prefix is removed.
- If source is an empty string, it matches everything. In this case,
destination becomes the path prefix.
Prefixes are matched with component granularity, not characters. Mappings with
more components take precedence. Mappings with equal components are sorted
asciibetically.
Mappings apply to the "long path" of a file, i.e. relative to TEST_SRCDIR,
e.g. workspace_name/pkg/file. Long paths do not take into consideration
bazel-foo/ output directories.
The deps attribute allows zip_file() rules to depend on other zip_file() rules.
In such cases, the contents of directly dependent zip files are unzipped and
then re-zipped. Mappings specified by the current rule do not apply to the
files extracted from dependent zips. However those files can be overridden.
The simplest example of this rule, which simply zips up short paths, is as
follows:
# //my/package/BUILD
zip_file(
name = "doodle",
srcs = ["hello.txt"],
mappings = {"": ""},
)
The rule above would create a zip file name //my/package/doodle.zip which would
contain a single file named "my/package/hello.txt".
If we wanted to strip the package path, we could do the following:
# //my/package/BUILD
zip_file(
name = "doodle",
srcs = ["hello.txt"],
mappings = {"my/package": ""},
)
In this case, doodle.zip would contain a single file: "hello.txt".
If we wanted to rename hello.txt, we could do the following:
# //my/package/BUILD
zip_file(
name = "doodle",
srcs = ["hello.txt"],
mappings = {"my/package/hello.txt": "my/package/world.txt"},
)
A zip file can be assembled across many rules. For example:
# //webapp/html/BUILD
zip_file(
name = "assets",
srcs = glob(["*.html"]),
mappings = {"webapp/html": ""},
)
# //webapp/js/BUILD
zip_file(
name = "assets",
srcs = glob(["*.js"]),
mappings = {"webapp/js": "assets/js"},
)
# //webapp/BUILD
zip_file(
name = "war",
deps = [
"//webapp/html:assets",
"//webapp/js:assets",
],
mappings = {"webapp/html": ""},
)
You can exclude files with the "exclude" attribute:
# //webapp/BUILD
zip_file(
name = "war_without_tears",
deps = ["war"],
exclude = ["assets/js/tears.js"],
)
Note that "exclude" excludes based on the mapped path relative to the root of
the zipfile. If the file doesn't exist, you'll get an error.
"""
load(
"//java/google/registry/builddefs:defs.bzl",
"ZIPPER",
"collect_runfiles",
"long_path",
)
def _zip_file(ctx):
"""Implementation of zip_file() rule."""
for s, d in ctx.attr.mappings.items():
if (s.startswith("/") or s.endswith("/") or
d.startswith("/") or d.endswith("/")):
fail("mappings should not begin or end with slash")
srcs = depset()
srcs += ctx.files.srcs
srcs += ctx.files.data
srcs += collect_runfiles(ctx.attr.data)
mapped = _map_sources(ctx, srcs, ctx.attr.mappings)
cmd = [
"#!/bin/sh",
"set -e",
'repo="$(pwd)"',
'zipper="${repo}/%s"' % ctx.file._zipper.path,
'archive="${repo}/%s"' % ctx.outputs.out.path,
'tmp="$(mktemp -d "${TMPDIR:-/tmp}/zip_file.XXXXXXXXXX")"',
'cd "${tmp}"',
]
cmd += [
'"${zipper}" x "${repo}/%s"' % dep.zip_file.path
for dep in ctx.attr.deps
]
cmd += ["rm %s" % filename for filename in ctx.attr.exclude]
cmd += [
'mkdir -p "${tmp}/%s"' % zip_path
for zip_path in depset(
[
zip_path[:zip_path.rindex("/")]
for _, zip_path in mapped
if "/" in zip_path
],
).to_list()
]
cmd += [
'ln -sf "${repo}/%s" "${tmp}/%s"' % (path, zip_path)
for path, zip_path in mapped
]
cmd += [
("find . | sed 1d | cut -c 3- | LC_ALL=C sort" +
' | xargs "${zipper}" cC "${archive}"'),
'cd "${repo}"',
'rm -rf "${tmp}"',
]
if hasattr(ctx, "bin_dir"):
script = ctx.new_file(ctx.bin_dir, "%s.sh" % ctx.label.name)
else:
# TODO(kchodorow): remove this once Bazel 4.0+ is required.
script = ctx.new_file(ctx.configuration.bin_dir, "%s.sh" % ctx.label.name)
ctx.actions.write(output = script, content = "\n".join(cmd), is_executable = True)
inputs = [ctx.file._zipper]
inputs += [dep.zip_file for dep in ctx.attr.deps]
inputs += srcs.to_list()
ctx.actions.run(
inputs = inputs,
outputs = [ctx.outputs.out],
executable = script,
mnemonic = "zip",
progress_message = "Creating zip with %d inputs %s" % (
len(inputs),
ctx.label,
),
)
return struct(files = depset([ctx.outputs.out]), zip_file = ctx.outputs.out)
def _map_sources(ctx, srcs, mappings):
"""Calculates paths in zip file for srcs."""
# order mappings with more path components first
mappings = sorted([
(-len(source.split("/")), source, dest)
for source, dest in mappings.items()
])
# get rid of the integer part of tuple used for sorting
mappings = [(source, dest) for _, source, dest in mappings]
mappings_indexes = range(len(mappings))
used = {i: False for i in mappings_indexes}
mapped = []
for file_ in srcs:
run_path = long_path(ctx, file_)
zip_path = None
for i in mappings_indexes:
source = mappings[i][0]
dest = mappings[i][1]
if not source:
if dest:
zip_path = dest + "/" + run_path
else:
zip_path = run_path
elif source == run_path:
if dest:
zip_path = dest
else:
zip_path = run_path
elif run_path.startswith(source + "/"):
if dest:
zip_path = dest + run_path[len(source):]
else:
zip_path = run_path[len(source) + 1:]
else:
continue
used[i] = True
break
if not zip_path:
fail("no mapping matched: " + run_path)
mapped += [(file_.path, zip_path)]
for i in mappings_indexes:
if not used[i]:
fail('superfluous mapping: "%s" -> "%s"' % mappings[i])
return mapped
zip_file = rule(
implementation = _zip_file,
output_to_genfiles = True,
attrs = {
"out": attr.output(mandatory = True),
"srcs": attr.label_list(allow_files = True),
"data": attr.label_list(allow_files = True),
"deps": attr.label_list(providers = ["zip_file"]),
"exclude": attr.string_list(),
"mappings": attr.string_dict(),
"_zipper": attr.label(default = Label(ZIPPER), allow_single_file = True),
},
)

View file

@ -0,0 +1,28 @@
package(
default_visibility = ["//visibility:public"],
)
licenses(["notice"]) # Apache 2.0
java_library(
name = "config",
srcs = glob(["*.java"]),
resources = glob(["files/*.yaml"]),
deps = [
"//java/google/registry/keyring/api",
"//java/google/registry/util",
"@com_google_api_client",
"@com_google_api_client_appengine",
"@com_google_appengine_api_1_0_sdk",
"@com_google_auto_value",
"@com_google_code_findbugs_jsr305",
"@com_google_dagger",
"@com_google_flogger",
"@com_google_flogger_system_backend",
"@com_google_guava",
"@com_google_http_client",
"@javax_inject",
"@joda_time",
"@org_joda_money",
],
)

View file

@ -0,0 +1,37 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.config;
import java.net.MalformedURLException;
import java.net.URL;
/** Helper methods for configuration classes. */
public final class ConfigUtils {
/**
* Creates a URL instance.
*
* @throws RuntimeException to rethrow {@link MalformedURLException}
*/
public static URL makeUrl(String url) {
try {
return new URL(url);
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
private ConfigUtils() {}
}

View file

@ -0,0 +1,162 @@
// Copyright 2018 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.config;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.api.client.googleapis.auth.oauth2.GoogleCredential;
import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport;
import com.google.api.client.googleapis.util.Utils;
import com.google.common.collect.ImmutableList;
import dagger.Module;
import dagger.Provides;
import google.registry.config.RegistryConfig.Config;
import google.registry.keyring.api.KeyModule.Key;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.lang.annotation.Documented;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.security.GeneralSecurityException;
import javax.inject.Qualifier;
import javax.inject.Singleton;
/**
* Dagger module that provides all {@link GoogleCredential GoogleCredentials} used in the
* application.
*/
@Module
public abstract class CredentialModule {
/**
* Provides the default {@link GoogleCredential} from the Google Cloud runtime.
*
* <p>The credential returned depends on the runtime environment:
*
* <ul>
* <li>On AppEngine, returns the service account credential for
* PROJECT_ID@appspot.gserviceaccount.com
* <li>On Compute Engine, returns the service account credential for
* PROJECT_NUMBER-compute@developer.gserviceaccount.com
* <li>On end user host, this returns the credential downloaded by gcloud. Please refer to <a
* href="https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login">Cloud
* SDK documentation</a> for details.
* </ul>
*/
@DefaultCredential
@Provides
@Singleton
public static GoogleCredential provideDefaultCredential(
@Config("defaultCredentialOauthScopes") ImmutableList<String> requiredScopes) {
GoogleCredential credential;
try {
credential = GoogleCredential.getApplicationDefault();
} catch (IOException e) {
throw new RuntimeException(e);
}
if (credential.createScopedRequired()) {
return credential.createScoped(requiredScopes);
}
return credential;
}
/**
* Provides a {@link GoogleCredential} from the service account's JSON key file.
*
* <p>On App Engine, a thread created using Java's built-in API needs this credential when it
* calls App Engine API. The Google Sheets API also needs this credential.
*/
@JsonCredential
@Provides
@Singleton
public static GoogleCredential provideJsonCredential(
@Config("defaultCredentialOauthScopes") ImmutableList<String> requiredScopes,
@Key("jsonCredential") String jsonCredential) {
GoogleCredential credential;
try {
credential =
GoogleCredential.fromStream(
new ByteArrayInputStream(jsonCredential.getBytes(UTF_8)),
// We cannot use UrlFetchTransport as that uses App Engine API.
GoogleNetHttpTransport.newTrustedTransport(),
Utils.getDefaultJsonFactory());
} catch (IOException | GeneralSecurityException e) {
throw new RuntimeException(e);
}
if (credential.createScopedRequired()) {
credential = credential.createScoped(requiredScopes);
}
return credential;
}
/**
* Provides a {@link GoogleCredential} with delegated admin access for a G Suite domain.
*
* <p>The G Suite domain must grant delegated admin access to the registry service account with
* all scopes in {@code requiredScopes}, including ones not related to G Suite.
*/
@DelegatedCredential
@Provides
@Singleton
public static GoogleCredential provideDelegatedCredential(
@Config("delegatedCredentialOauthScopes") ImmutableList<String> requiredScopes,
@JsonCredential GoogleCredential googleCredential,
@Config("gSuiteAdminAccountEmailAddress") String gSuiteAdminAccountEmailAddress) {
return new GoogleCredential.Builder()
.setTransport(Utils.getDefaultTransport())
.setJsonFactory(Utils.getDefaultJsonFactory())
.setServiceAccountId(googleCredential.getServiceAccountId())
.setServiceAccountPrivateKey(googleCredential.getServiceAccountPrivateKey())
.setServiceAccountScopes(requiredScopes)
.setServiceAccountUser(gSuiteAdminAccountEmailAddress)
.build();
}
/** Dagger qualifier for the Application Default Credential. */
@Qualifier
@Documented
@Retention(RetentionPolicy.RUNTIME)
public @interface DefaultCredential {}
/**
* Dagger qualifier for a credential from a service account's JSON key, to be used in non-request
* threads.
*/
@Qualifier
@Documented
@Retention(RetentionPolicy.RUNTIME)
public @interface JsonCredential {}
/**
* Dagger qualifier for a credential with delegated admin access for a dasher domain (for G
* Suite).
*/
@Qualifier
@Documented
@Retention(RetentionPolicy.RUNTIME)
public @interface DelegatedCredential {}
/** Dagger qualifier for the local credential used in the nomulus tool. */
@Qualifier
@Documented
@Retention(RetentionPolicy.RUNTIME)
public @interface LocalCredential {}
/** Dagger qualifier for the JSON string used to create the local credential. */
@Qualifier
@Documented
@Retention(RetentionPolicy.RUNTIME)
public @interface LocalCredentialJson {}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,198 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.config;
import java.util.List;
/** The POJO that YAML config files are deserialized into. */
public class RegistryConfigSettings {
public AppEngine appEngine;
public GSuite gSuite;
public OAuth oAuth;
public CredentialOAuth credentialOAuth;
public RegistryPolicy registryPolicy;
public Datastore datastore;
public CloudDns cloudDns;
public Caching caching;
public IcannReporting icannReporting;
public Billing billing;
public Rde rde;
public RegistrarConsole registrarConsole;
public Monitoring monitoring;
public Misc misc;
public Beam beam;
public Keyring keyring;
public RegistryTool registryTool;
/** Configuration options that apply to the entire App Engine project. */
public static class AppEngine {
public String projectId;
public boolean isLocal;
public String defaultServiceUrl;
public String backendServiceUrl;
public String toolsServiceUrl;
public String pubapiServiceUrl;
}
/** Configuration options for OAuth settings for authenticating users. */
public static class OAuth {
public List<String> availableOauthScopes;
public List<String> requiredOauthScopes;
public List<String> allowedOauthClientIds;
}
/** Configuration options for accessing Google APIs. */
public static class CredentialOAuth {
public List<String> defaultCredentialOauthScopes;
public List<String> delegatedCredentialOauthScopes;
public List<String> localCredentialOauthScopes;
}
/** Configuration options for the G Suite account used by Nomulus. */
public static class GSuite {
public String domainName;
public String outgoingEmailAddress;
public String outgoingEmailDisplayName;
public String adminAccountEmailAddress;
public String supportGroupEmailAddress;
}
/** Configuration options for registry policy. */
public static class RegistryPolicy {
public String contactAndHostRoidSuffix;
public String productName;
public String customLogicFactoryClass;
public String whoisCommandFactoryClass;
public String allocationTokenCustomLogicClass;
public String dnsCountQueryCoordinatorClass;
public int contactAutomaticTransferDays;
public String greetingServerId;
public List<String> registrarChangesNotificationEmailAddresses;
public String defaultRegistrarWhoisServer;
public String tmchCaMode;
public String tmchCrlUrl;
public String tmchMarksDbUrl;
public String checkApiServletClientId;
public String registryAdminClientId;
public String premiumTermsExportDisclaimer;
public String reservedTermsExportDisclaimer;
public String whoisRedactedEmailText;
public String whoisDisclaimer;
public String rdapTos;
public String rdapTosStaticUrl;
public String registryName;
public List<String> spec11WebResources;
public boolean requireSslCertificates;
}
/** Configuration for Cloud Datastore. */
public static class Datastore {
public int commitLogBucketsNum;
public int eppResourceIndexBucketsNum;
public int baseOfyRetryMillis;
}
/** Configuration for Apache Beam (Cloud Dataflow). */
public static class Beam {
public String defaultJobZone;
}
/** Configuration for Cloud DNS. */
public static class CloudDns {
public String rootUrl;
public String servicePath;
}
/** Configuration for caching. */
public static class Caching {
public int singletonCacheRefreshSeconds;
public int domainLabelCachingSeconds;
public int singletonCachePersistSeconds;
public int staticPremiumListMaxCachedEntries;
public boolean eppResourceCachingEnabled;
public int eppResourceCachingSeconds;
public int eppResourceMaxCachedEntries;
}
/** Configuration for ICANN monthly reporting. */
public static class IcannReporting {
public String icannTransactionsReportingUploadUrl;
public String icannActivityReportingUploadUrl;
}
/** Configuration for monthly invoices. */
public static class Billing {
public List<String> invoiceEmailRecipients;
public String invoiceFilePrefix;
}
/** Configuration for Registry Data Escrow (RDE). */
public static class Rde {
public String reportUrlPrefix;
public String uploadUrl;
public String sshIdentityEmailAddress;
}
/** Configuration for the web-based registrar console. */
public static class RegistrarConsole {
public String logoFilename;
public String supportPhoneNumber;
public String supportEmailAddress;
public String announcementsEmailAddress;
public String integrationEmailAddress;
public String technicalDocsUrl;
public AnalyticsConfig analyticsConfig;
}
/** Configuration for analytics services installed in the registrar console */
public static class AnalyticsConfig {
public String googleAnalyticsId;
}
/** Configuration for monitoring. */
public static class Monitoring {
public int stackdriverMaxQps;
public int stackdriverMaxPointsPerRequest;
public int writeIntervalSeconds;
}
/** Miscellaneous configuration that doesn't quite fit in anywhere else. */
public static class Misc {
public String sheetExportId;
public String alertRecipientEmailAddress;
public String spec11ReplyToEmailAddress;
public int asyncDeleteDelaySeconds;
public int transientFailureRetries;
}
/** Configuration for keyrings (used to store secrets outside of source). */
public static class Keyring {
public String activeKeyring;
public Kms kms;
}
/** Configuration for Cloud KMS. */
public static class Kms {
public String keyringName;
public String projectId;
}
/** Configuration options for the registry tool. */
public static class RegistryTool {
public String clientId;
public String clientSecret;
}
}

View file

@ -0,0 +1,71 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.config;
import com.google.common.base.Ascii;
/** Registry environments. */
public enum RegistryEnvironment {
/** Production environment. */
PRODUCTION,
/** Development environment. */
ALPHA,
/** Load/Backup/Restore Testing environment. */
CRASH,
/** Local machine environment. */
LOCAL,
/** Quality Assurance environment. */
QA,
/** Sandbox environment. */
SANDBOX,
/**
* Unit testing environment.
*
* <p>This is the default enum value. This is because it's non-trivial to configure the system
* property that specifies the environment in our unit tests.
*
* <p>Do not use this environment outside of unit tests.
*/
UNITTEST;
/** Sets this enum as the name of the registry environment. */
public RegistryEnvironment setup() {
return setup(SystemPropertySetter.PRODUCTION_IMPL);
}
/**
* Sets this enum as the name of the registry environment using specified {@link
* SystemPropertySetter}.
*/
public RegistryEnvironment setup(SystemPropertySetter systemPropertySetter) {
systemPropertySetter.setProperty(PROPERTY, name());
return this;
}
/** Returns environment configured by system property {@value #PROPERTY}. */
public static RegistryEnvironment get() {
return valueOf(Ascii.toUpperCase(System.getProperty(PROPERTY, UNITTEST.name())));
}
/** System property for configuring which environment we should use. */
private static final String PROPERTY = "google.registry.environment";
}

View file

@ -0,0 +1,47 @@
// Copyright 2018 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.config;
import javax.annotation.Nullable;
/**
* Wrapper interface around {@link System#setProperty(String, String)} and {@link
* System#clearProperty(String)}. Tests that modify system properties may provide custom
* implementations that keeps track of changes and restores original property values on test
* completion.
*/
public interface SystemPropertySetter {
/**
* Updates the system property specified by {@code key}. If {@code value} is not null, {@link
* System#setProperty(String, String)} is invoked; otherwise {@link System#clearProperty(String)}
* is invoked.
*/
SystemPropertySetter setProperty(String key, @Nullable String value);
/** Production implementation of {@link SystemPropertySetter}. */
SystemPropertySetter PRODUCTION_IMPL =
new SystemPropertySetter() {
@Override
public SystemPropertySetter setProperty(String key, @Nullable String value) {
if (value == null) {
System.clearProperty(key);
} else {
System.setProperty(key, value);
}
return this;
}
};
}

View file

@ -0,0 +1,396 @@
# This is the default configuration file for Nomulus. Do not make changes to it
# unless you are writing new features that requires you to. To customize an
# individual deployment or environment, create a nomulus-config.yaml file in the
# WEB-INF/ directory overriding only the values you wish to change. You may need
# to override some of these values to configure and enable some services used in
# production environments.
appEngine:
# Globally unique App Engine project ID
projectId: registry-project-id
# whether to use local/test credentials when connecting to the servers
isLocal: true
# URLs of the services for the project.
defaultServiceUrl: https://localhost
backendServiceUrl: https://localhost
toolsServiceUrl: https://localhost
pubapiServiceUrl: https://localhost
gSuite:
# Publicly accessible domain name of the running G Suite instance.
domainName: domain-registry.example
# Display name and email address used on outgoing emails through G Suite.
# The email address must be valid and have permission in the GAE app to send
# emails. For more info see:
# https://cloud.google.com/appengine/docs/standard/java/mail/#who_can_send_mail
outgoingEmailDisplayName: Example Registry
outgoingEmailAddress: noreply@project-id.appspotmail.com
# Email address of the admin account on the G Suite app. This is used for
# logging in to perform administrative actions, not sending emails.
adminAccountEmailAddress: admin@example.com
# Group containing the emails of the support accounts. These accounts will be
# given "ADMIN" role on the registrar console.
supportGroupEmailAddress: support@example.com
registryPolicy:
# Repository identifier (ROID) suffix for contacts and hosts.
contactAndHostRoidSuffix: ROID
# Product name of the registry. Used throughout the registrar console.
productName: Nomulus
# Custom logic factory fully-qualified class name.
# See flows/custom/CustomLogicFactory.java
customLogicFactoryClass: google.registry.flows.custom.CustomLogicFactory
# WHOIS command factory fully-qualified class name.
# See whois/WhoisCommandFactory.java
whoisCommandFactoryClass: google.registry.whois.WhoisCommandFactory
# Custom logic class for handling allocation tokens.
# See flows/domain/token/AllocationTokenCustomLogic.java
allocationTokenCustomLogicClass: google.registry.flows.domain.token.AllocationTokenCustomLogic
# Custom logic class for handling DNS query count reporting for ICANN.
# See reporting/icann/DnsCountQueryCoordinator.java
dnsCountQueryCoordinatorClass: google.registry.reporting.icann.BasicDnsCountQueryCoordinator
# Length of time after which contact transfers automatically conclude.
contactAutomaticTransferDays: 5
# Server ID used in the 'svID' element of an EPP 'greeting'.
greetingServerId: Nomulus Registry
# List of email addresses that notifications of registrar and/or registrar
# contact updates should be sent to, or empty list for no notifications.
registrarChangesNotificationEmailAddresses: []
# Default WHOIS server used when not specified on a registrar.
defaultRegistrarWhoisServer: whois.domain-registry.example
# Mode TMCH should run in (PRODUCTION for production environments, PILOT for
# all others including sandbox).
tmchCaMode: PILOT
# URL for the ICANN TMCH Certificate Revocation List.
tmchCrlUrl: http://crl.icann.org/tmch_pilot.crl
# URL for the MarksDB registry interface.
tmchMarksDbUrl: https://test-ry.marksdb.org
# Registrys operations registrar, used for front-end availability/premium
# domain checks.
checkApiServletClientId: TheRegistrar
# The registry admin's registrar. Admins are granted permission to log in
# using this registrar automatically if they are not associated with any
# registrar
registryAdminClientId: TheRegistrar
# Disclaimer at the top of the exported premium terms list.
premiumTermsExportDisclaimer: |
This list contains domains for the TLD offered at a premium price. This
list is subject to change. The most up-to-date source is always the
registry itself, by sending domain check EPP commands.
# Disclaimer at the top of the exported reserved terms list.
reservedTermsExportDisclaimer: |
This list contains reserved terms for the TLD. Other terms may be reserved
but not included in this list, including terms the registry chooses not
to publish. This list is subject to change. The most up-to-date source
is always the registry itself, by sending domain check EPP commands.
# Redaction text for email address in WHOIS
whoisRedactedEmailText: |
Please query the WHOIS server of the owning registrar identified in this
output for information on how to contact the Registrant, Admin, or Tech
contact of the queried domain name.
# Disclaimer at the top of WHOIS results.
whoisDisclaimer: |
WHOIS information is provided by the registry solely for query-based,
informational purposes. Any information provided is "as is" without any
guarantee of accuracy. You may not use such information to (a) allow,
enable, or otherwise support the transmission of mass unsolicited,
commercial advertising or solicitations; (b) enable high volume, automated,
electronic processes that access the registry's systems or any
ICANN-Accredited Registrar, except as reasonably necessary to register
domain names or modify existing registrations; or (c) engage in or support
unlawful behavior. We reserve the right to restrict or deny your access to
the WHOIS database, and may modify these terms at any time.
# RDAP Terms of Service text displayed at the /rdap/help/tos endpoint.
rdapTos: >
By querying our Domain Database as part of the RDAP pilot program (RDAP
Domain Database), you are agreeing to comply with these terms, so please
read them carefully.
Any information provided is 'as is' without any guarantee of accuracy.
Please do not misuse the RDAP Domain Database. It is intended solely for
query-based access on an experimental basis and should not be used for or
relied upon for any other purpose.
Don't use the RDAP Domain Database to allow, enable, or otherwise support
the transmission of mass unsolicited, commercial advertising or
solicitations.
Don't access our RDAP Domain Database through the use of high volume,
automated electronic processes that send queries or data to the systems
of any ICANN-accredited registrar.
You may only use the information contained in the RDAP Domain Database for
lawful purposes.
Do not compile, repackage, disseminate, or otherwise use the information
contained in the RDAP Domain Database in its entirety, or in any
substantial portion, without our prior written permission.
We may retain certain details about queries to our RDAP Domain Database
for the purposes of detecting and preventing misuse.
We reserve the right to restrict or deny your access to the RDAP Domain
Database if we suspect that you have failed to comply with these terms.
We reserve the right to modify or discontinue our participation in the
RDAP pilot program and suspend or terminate access to the RDAP Domain
Database at any time and for any reason in our sole discretion.
We reserve the right to modify this agreement at any time.
# Link to static Web page with RDAP terms of service. Displayed in RDAP
# responses. If null, no static Web page link is generated.
rdapTosStaticUrl: null
# Name of the registry for use in spec 11 emails
registryName: Example Registry
# A list of resources we send to registrars when informing them of
# spec 11 threats
spec11WebResources: []
# Whether to require an SSL certificate hash in order to be able to log in
# via EPP and run commands. This can be false for testing environments but
# should generally be true for production environments, for added security.
requireSslCertificates: true
datastore:
# Number of commit log buckets in Datastore. Lowering this after initial
# install risks losing up to a days' worth of differential backups.
commitLogBucketsNum: 397
# Number of EPP resource index buckets in Datastore. Dont change after
# initial install.
eppResourceIndexBucketsNum: 997
# Milliseconds that Objectify waits to retry a Datastore transaction (this
# doubles after each failure).
baseOfyRetryMillis: 100
cloudDns:
# Set both properties to null in Production.
# The root url for the Cloud DNS API. Set this to a non-null value to
# override the default API server used by the googleapis library.
rootUrl: https://staging-www.sandbox.googleapis.com
# The service endpoint path for the Cloud DNS API. Set this to a non-null
# value to override the default API path used by the googleapis library.
servicePath: dns/v2beta1_staging/projects/
caching:
# Length of time that a singleton should be cached before expiring.
singletonCacheRefreshSeconds: 600
# Length of time that a reserved/premium list should be cached before expiring.
domainLabelCachingSeconds: 3600
# Length of time that a long-lived singleton in persist mode should be cached.
singletonCachePersistSeconds: 31557600 # This is one year.
# Maximum total number of static premium list entry entities to cache in
# memory, across all premium lists for all TLDs. Tuning this up will use more
# memory (and might require using larger App Engine instances). Note that
# premium list entries that are absent are cached in addition to ones that are
# present, so the total cache size is not bounded by the total number of
# premium price entries that exist.
staticPremiumListMaxCachedEntries: 200000
# Whether to enable caching of EPP resource entities and keys. Enabling this
# caching allows for much higher domain create/update throughput when hosts
# and/or contacts are being frequently used (which is commonly the case).
# However, this may introduce transactional inconsistencies, such as allowing
# hosts or contacts to be used that are actually deleted (though in practice
# this will only happen for non-widely-used entities). Only set this to true
# if you need the performance, i.e. if you need >10 domain mutations per
# frequently used contact or host. This situation is typically caused by
# registrars reusing the same contact/host across many operations, e.g. a
# privacy/proxy contact or a common host pointing to a registrar-run
# nameserver.
eppResourceCachingEnabled: false
# Length of time that EPP resource entities and keys are cached in memory
# before expiring. This should always be shorter than asyncDeleteDelaySeconds,
# to prevent deleted contacts or hosts from being used on domains.
eppResourceCachingSeconds: 60
# The maximum number of EPP resource entities and keys to cache in memory.
# LoadingCache evicts rarely-used keys first, so in practice this does not
# have to be very large to achieve the vast majority of possible gains.
eppResourceMaxCachedEntries: 500
oAuth:
# OAuth scopes to detect on access tokens. Superset of requiredOauthScopes.
availableOauthScopes:
- https://www.googleapis.com/auth/userinfo.email
# OAuth scopes required for authenticating. Subset of availableOauthScopes.
requiredOauthScopes:
- https://www.googleapis.com/auth/userinfo.email
# OAuth client IDs that are allowed to authenticate and communicate with
# backend services, e. g. nomulus tool, EPP proxy, etc. The client_id value
# used in registryTool.clientId field for associated tooling should be included
# in this list. Client IDs are typically of the format
# numbers-alphanumerics.apps.googleusercontent.com
allowedOauthClientIds: []
credentialOAuth:
# OAuth scopes required for accessing Google APIs using the default
# credential.
defaultCredentialOauthScopes:
# View and manage data in all Google Cloud APIs.
- https://www.googleapis.com/auth/cloud-platform
# View and manage files in Google Drive, e.g., Docs and Sheets.
- https://www.googleapis.com/auth/drive
# OAuth scopes required for delegated admin access to G Suite domain.
# Deployment of changes to this list must be coordinated with G Suite admin
# configuration, which can be managed in the admin console:
# - New scopes must be added to the G Suite domain configuration before the
# release is deployed.
# - Removed scopes must remain on G Suite domain configuration until the
# release is deployed.
delegatedCredentialOauthScopes:
# View and manage groups on your domain in Directory API.
- https://www.googleapis.com/auth/admin.directory.group
# View and manage group settings in Group Settings API.
- https://www.googleapis.com/auth/apps.groups.settings
# OAuth scopes required to create a credential locally in for the nomulus tool.
localCredentialOauthScopes:
# View and manage data in all Google Cloud APIs.
- https://www.googleapis.com/auth/cloud-platform
# Call App Engine APIs locally.
- https://www.googleapis.com/auth/appengine.apis
# View your email address.
- https://www.googleapis.com/auth/userinfo.email
# View and manage your applications deployed on Google App Engine
- https://www.googleapis.com/auth/appengine.admin
icannReporting:
# URL we PUT monthly ICANN transactions reports to.
icannTransactionsReportingUploadUrl: https://ry-api.icann.org/report/registrar-transactions
# URL we PUT monthly ICANN activity reports to.
icannActivityReportingUploadUrl: https://ry-api.icann.org/report/registry-functions-activity
billing:
invoiceEmailRecipients: []
invoiceFilePrefix: REG-INV
rde:
# URL prefix of ICANN's server to upload RDE reports to. Nomulus adds /TLD/ID
# to the end of this to construct the full URL.
reportUrlPrefix: https://test-ry-api.icann.org:8543/report/registry-escrow-report
# SFTP URL to which RDE deposits are uploaded. This should contain a username
# but not the password.
uploadUrl: sftp://username@rde-provider.example
# Identity of the SSH keys (stored in the Keyring) used for RDE SFTP uploads.
sshIdentityEmailAddress: rde@example.com
registrarConsole:
# Filename of the logo to use in the header of the console. This filename is
# relative to ui/assets/images/
logoFilename: logo.png
# Contact phone number for support with the registry.
supportPhoneNumber: +1 (888) 555 0123
# Contact email address for support with the registry.
supportEmailAddress: support@example.com
# From: email address used to send announcements from the registry.
announcementsEmailAddress: announcements@example.com
# Contact email address for questions about integrating with the registry.
integrationEmailAddress: integration@example.com
# URL linking to directory of technical support docs on the registry.
technicalDocsUrl: http://example.com/your_support_docs/
# Configuration for all analytics services installed in the web console
analyticsConfig:
# Google Analytics account where data on console use is sent, optional
googleAnalyticsId: null
monitoring:
# Max queries per second for the Google Cloud Monitoring V3 (aka Stackdriver)
# API. The limit can be adjusted by contacting Cloud Support.
stackdriverMaxQps: 30
# Max number of points that can be sent to Stackdriver in a single
# TimeSeries.Create API call.
stackdriverMaxPointsPerRequest: 200
# How often metrics are exported to BigQuery.
writeIntervalSeconds: 60
misc:
# The ID of the Google Sheet (as found in the URL) to export registrar details
# to. Leave this null to disable syncing.
sheetExportId: null
# Address we send alert summary emails to.
alertRecipientEmailAddress: email@example.com
# Address to which the Spec 11 emails to registrars should be replied. This needs
# to be a deliverable email address in case the registrars want to contact us.
spec11ReplyToEmailAddress: reply-to@example.com
# How long to delay processing of asynchronous deletions. This should always
# be longer than eppResourceCachingSeconds, to prevent deleted contacts or
# hosts from being used on domains.
asyncDeleteDelaySeconds: 90
# Number of times to retry a GAE operation when a transient exception is thrown.
# The number of milliseconds it'll sleep before giving up is (2^n - 2) * 100.
transientFailureRetries: 12
beam:
# The default zone to run Apache Beam (Cloud Dataflow) jobs in.
defaultJobZone: us-east1-c
keyring:
# The name of the active keyring, either "KMS" or "Dummy".
activeKeyring: Dummy
# Configuration options specific to Google Cloud KMS.
kms:
# GCP project containing the KMS keyring. Should only be used for KMS in
# order to keep a simple locked down IAM configuration.
projectId: registry-kms-project-id
# The name to use for the Cloud KMS KeyRing which will store encryption keys
# for Nomulus secrets.
keyringName: nomulus
# Configuration options relevant to the "nomulus" registry tool.
registryTool:
# OAuth client Id used by the tool.
clientId: YOUR_CLIENT_ID
# OAuth client secret used by the tool.
clientSecret: YOUR_CLIENT_SECRET

View file

@ -0,0 +1 @@
# Add environment-specific configuration here.

View file

@ -0,0 +1 @@
# Add environment-specific configuration here.

View file

@ -0,0 +1 @@
# Add environment-specific configuration here.

View file

@ -0,0 +1,76 @@
# This is a sample production config (to be deployed in the WEB-INF directory).
# This is the same as what Google Registry runs in production, except with
# placeholders for Google-specific settings.
appEngine:
projectId: placeholder
# Set to true if running against local servers (localhost)
isLocal: false
# The "<service>-dot-" prefix is used on the project ID in this URL in order
# to get around an issue with double-wildcard SSL certs.
defaultServiceUrl: https://domain-registry-placeholder.appspot.com
backendServiceUrl: https://backend-dot-domain-registry-placeholder.appspot.com
toolsServiceUrl: https://tools-dot-domain-registry-placeholder.appspot.com
pubapiServiceUrl: https://pubapi-dot-domain-registry-placeholder.appspot.com
gSuite:
domainName: placeholder
outgoingEmailDisplayName: placeholder
outgoingEmailAddress: placeholder
adminAccountEmailAddress: placeholder
supportGroupEmailAddress: placeholder
registryPolicy:
contactAndHostRoidSuffix: placeholder
productName: placeholder
greetingServerId: placeholder
registrarChangesNotificationEmailAddresses:
- placeholder
- placeholder
defaultRegistrarWhoisServer: placeholder
tmchCaMode: PRODUCTION
tmchCrlUrl: http://crl.icann.org/tmch.crl
tmchMarksDbUrl: https://ry.marksdb.org
checkApiServletClientId: placeholder
registryAdminClientId: placeholder
whoisDisclaimer: |
multi-line
placeholder
icannReporting:
icannTransactionsReportingUploadUrl: https://ry-api.icann.org/report/registrar-transactions
icannActivityReportingUploadUrl: https://ry-api.icann.org/report/registry-functions-activity
oAuth:
allowedOauthClientIds:
- placeholder.apps.googleusercontent.com
- placeholder-for-proxy
rde:
reportUrlPrefix: https://ry-api.icann.org/report/registry-escrow-report
uploadUrl: sftp://placeholder@sftpipm2.ironmountain.com/Outbox
sshIdentityEmailAddress: placeholder
registrarConsole:
logoFilename: placeholder
supportPhoneNumber: placeholder
supportEmailAddress: placeholder
announcementsEmailAddress: placeholder
integrationEmailAddress: placeholder
technicalDocsUrl: https://drive.google.com/drive/folders/placeholder
misc:
sheetExportId: placeholder
cloudDns:
rootUrl: null
servicePath: null
keyring:
activeKeyring: KMS
kms:
projectId: placeholder
registryTool:
clientId: placeholder.apps.googleusercontent.com
clientSecret: placeholder

View file

@ -0,0 +1 @@
# Add environment-specific configuration here.

View file

@ -0,0 +1 @@
# Add environment-specific configuration here.

View file

@ -0,0 +1 @@
# Add environment-specific configuration here.

View file

@ -0,0 +1,33 @@
# This is the configuration file used by unit tests. These values ARE NOT
# SUITABLE for use in a real deployed environment.
registryPolicy:
registrarChangesNotificationEmailAddresses:
- notification@test.example
- notification2@test.example
defaultRegistrarWhoisServer: whois.nic.fakewhois.example
reservedTermsExportDisclaimer: |
Disclaimer line 1.
Line 2 is this 1.
datastore:
commitLogBucketsNum: 3
eppResourceIndexBucketsNum: 3
baseOfyRetryMillis: 0
caching:
singletonCacheRefreshSeconds: 0
domainLabelCachingSeconds: 0
singletonCachePersistSeconds: 0
staticPremiumListMaxCachedEntries: 50
eppResourceCachingEnabled: true
eppResourceCachingSeconds: 0
# Remove the support G Suite group, because we don't want to try connecting to G Suite servers from
# tests
gSuite:
supportGroupEmailAddress:
misc:
# We would rather have failures than timeouts, so reduce the number of retries
transientFailureRetries: 3

View file

@ -0,0 +1,10 @@
licenses(["notice"]) # Apache 2.0
package(
default_visibility = ["//java/google/registry:registry_project"],
)
filegroup(
name = "all_lists",
srcs = glob(["*.txt"]),
)

View file

@ -0,0 +1,7 @@
# Example of a reserved list file. This is simply a CSV file with two
# columns: sub-domain name and price (specified as currency type and value).
#
# These are manipulated using the "nomulus" tool
# {create,update,delete,list}_premium_list commands.
foo,USD 100 # comment after the item
bar,JPY 14

View file

@ -0,0 +1,10 @@
licenses(["notice"]) # Apache 2.0
package(
default_visibility = ["//java/google/registry:registry_project"],
)
filegroup(
name = "all_lists",
srcs = glob(["**/*.txt"]),
)

View file

@ -0,0 +1,12 @@
# Example of a reserved list file. This is simply a CSV file with 2-3
# columns: sub-domain name and reservation type. See
# java/google/registry/model/registry/label/ReserverationType for the complete
# set of reservation types.
#
# These are manipulated using the "nomulus" tool
# {create,update,delete,list}_reserved_list commands.
sunrise,ALLOWED_IN_SUNRISE
specific,RESERVED_FOR_SPECIFIC_USE
anchor,RESERVED_FOR_ANCHOR_TENANT
collision,NAME_COLLISION
blocked,FULLY_BLOCKED # Comment after the line.

View file

@ -0,0 +1,16 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
@javax.annotation.ParametersAreNonnullByDefault
package google.registry.config;

View file

@ -0,0 +1,25 @@
package(
default_visibility = ["//visibility:public"],
)
licenses(["notice"]) # Apache 2.0
java_library(
name = "cron",
srcs = glob(["*.java"]),
deps = [
"//java/google/registry/model",
"//java/google/registry/request",
"//java/google/registry/request/auth",
"//java/google/registry/util",
"//third_party/objectify:objectify-v4_1",
"@com_google_appengine_api_1_0_sdk",
"@com_google_code_findbugs_jsr305",
"@com_google_dagger",
"@com_google_flogger",
"@com_google_flogger_system_backend",
"@com_google_guava",
"@javax_inject",
"@javax_servlet_api",
],
)

View file

@ -0,0 +1,62 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.cron;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.TaskOptions;
import google.registry.model.ofy.CommitLogBucket;
import google.registry.request.Action;
import google.registry.request.Parameter;
import google.registry.request.auth.Auth;
import google.registry.util.TaskQueueUtils;
import java.time.Duration;
import java.util.Optional;
import java.util.Random;
import javax.inject.Inject;
/** Action for fanning out cron tasks for each commit log bucket. */
@Action(
service = Action.Service.BACKEND,
path = "/_dr/cron/commitLogFanout",
automaticallyPrintOk = true,
auth = Auth.AUTH_INTERNAL_ONLY)
public final class CommitLogFanoutAction implements Runnable {
public static final String BUCKET_PARAM = "bucket";
private static final Random random = new Random();
@Inject TaskQueueUtils taskQueueUtils;
@Inject @Parameter("endpoint") String endpoint;
@Inject @Parameter("queue") String queue;
@Inject @Parameter("jitterSeconds") Optional<Integer> jitterSeconds;
@Inject CommitLogFanoutAction() {}
@Override
public void run() {
Queue taskQueue = getQueue(queue);
for (int bucketId : CommitLogBucket.getBucketIds()) {
long delay =
jitterSeconds.map(i -> random.nextInt((int) Duration.ofSeconds(i).toMillis())).orElse(0);
TaskOptions taskOptions =
TaskOptions.Builder.withUrl(endpoint)
.param(BUCKET_PARAM, Integer.toString(bucketId))
.countdownMillis(delay);
taskQueueUtils.enqueue(taskQueue, taskOptions);
}
}
}

View file

@ -0,0 +1,82 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.cron;
import static google.registry.request.RequestParameters.extractBooleanParameter;
import static google.registry.request.RequestParameters.extractOptionalIntParameter;
import static google.registry.request.RequestParameters.extractRequiredParameter;
import static google.registry.request.RequestParameters.extractSetOfParameters;
import com.google.common.collect.ImmutableSet;
import dagger.Module;
import dagger.Provides;
import google.registry.request.Parameter;
import java.util.Optional;
import javax.servlet.http.HttpServletRequest;
/** Dagger module for the cron package. */
@Module
public final class CronModule {
public static final String ENDPOINT_PARAM = "endpoint";
public static final String QUEUE_PARAM = "queue";
public static final String FOR_EACH_REAL_TLD_PARAM = "forEachRealTld";
public static final String FOR_EACH_TEST_TLD_PARAM = "forEachTestTld";
public static final String RUN_IN_EMPTY_PARAM = "runInEmpty";
public static final String EXCLUDE_PARAM = "exclude";
public static final String JITTER_SECONDS_PARAM = "jitterSeconds";
@Provides
@Parameter(ENDPOINT_PARAM)
static String provideEndpoint(HttpServletRequest req) {
return extractRequiredParameter(req, ENDPOINT_PARAM);
}
@Provides
@Parameter(EXCLUDE_PARAM)
static ImmutableSet<String> provideExcludes(HttpServletRequest req) {
return extractSetOfParameters(req, EXCLUDE_PARAM);
}
@Provides
@Parameter(QUEUE_PARAM)
static String provideQueue(HttpServletRequest req) {
return extractRequiredParameter(req, QUEUE_PARAM);
}
@Provides
@Parameter(RUN_IN_EMPTY_PARAM)
static boolean provideRunInEmpty(HttpServletRequest req) {
return extractBooleanParameter(req, RUN_IN_EMPTY_PARAM);
}
@Provides
@Parameter(FOR_EACH_REAL_TLD_PARAM)
static boolean provideForEachRealTld(HttpServletRequest req) {
return extractBooleanParameter(req, FOR_EACH_REAL_TLD_PARAM);
}
@Provides
@Parameter(FOR_EACH_TEST_TLD_PARAM)
static boolean provideForEachTestTld(HttpServletRequest req) {
return extractBooleanParameter(req, FOR_EACH_TEST_TLD_PARAM);
}
@Provides
@Parameter(JITTER_SECONDS_PARAM)
static Optional<Integer> provideJitterSeconds(HttpServletRequest req) {
return extractOptionalIntParameter(req, JITTER_SECONDS_PARAM);
}
}

View file

@ -0,0 +1,178 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.cron;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static com.google.appengine.api.taskqueue.TaskOptions.Builder.withUrl;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Predicates.in;
import static com.google.common.base.Predicates.not;
import static com.google.common.base.Strings.nullToEmpty;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import static com.google.common.collect.Iterables.getFirst;
import static com.google.common.collect.Multimaps.filterKeys;
import static com.google.common.net.MediaType.PLAIN_TEXT_UTF_8;
import static google.registry.cron.CronModule.ENDPOINT_PARAM;
import static google.registry.cron.CronModule.EXCLUDE_PARAM;
import static google.registry.cron.CronModule.FOR_EACH_REAL_TLD_PARAM;
import static google.registry.cron.CronModule.FOR_EACH_TEST_TLD_PARAM;
import static google.registry.cron.CronModule.JITTER_SECONDS_PARAM;
import static google.registry.cron.CronModule.QUEUE_PARAM;
import static google.registry.cron.CronModule.RUN_IN_EMPTY_PARAM;
import static google.registry.model.registry.Registries.getTldsOfType;
import static google.registry.model.registry.Registry.TldType.REAL;
import static google.registry.model.registry.Registry.TldType.TEST;
import static java.util.concurrent.TimeUnit.SECONDS;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.common.collect.ImmutableListMultimap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Multimap;
import com.google.common.collect.Streams;
import com.google.common.flogger.FluentLogger;
import google.registry.request.Action;
import google.registry.request.Parameter;
import google.registry.request.ParameterMap;
import google.registry.request.RequestParameters;
import google.registry.request.Response;
import google.registry.request.auth.Auth;
import google.registry.util.TaskQueueUtils;
import java.util.Optional;
import java.util.Random;
import java.util.stream.Stream;
import javax.inject.Inject;
/**
* Action for fanning out cron tasks shared by TLD.
*
* <h3>Parameters Reference</h3>
*
* <ul>
* <li>{@code endpoint} (Required) URL path of servlet to launch. This may contain pathargs.
* <li>{@code queue} (Required) Name of the App Engine push queue to which this task should be
* sent.
* <li>{@code forEachRealTld} Launch the task in each real TLD namespace.
* <li>{@code forEachTestTld} Launch the task in each test TLD namespace.
* <li>{@code runInEmpty} Launch the task once, without the TLD argument.
* <li>{@code exclude} TLDs to exclude.
* <li>{@code jitterSeconds} Randomly delay each task by up to this many seconds.
* <li>Any other parameters specified will be passed through as POST parameters to the called
* task.
* </ul>
*
* <h3>Patharg Reference</h3>
*
* <p>The following values may be specified inside the "endpoint" param.
*
* <ul>
* <li>{@code :tld} Substituted with an ASCII tld, if tld fanout is enabled. This patharg is
* mostly useful for aesthetic purposes, since tasks are already namespaced.
* </ul>
*/
@Action(
service = Action.Service.BACKEND,
path = "/_dr/cron/fanout",
automaticallyPrintOk = true,
auth = Auth.AUTH_INTERNAL_ONLY)
public final class TldFanoutAction implements Runnable {
/** A set of control params to TldFanoutAction that aren't passed down to the executing action. */
private static final ImmutableSet<String> CONTROL_PARAMS =
ImmutableSet.of(
ENDPOINT_PARAM,
QUEUE_PARAM,
FOR_EACH_REAL_TLD_PARAM,
FOR_EACH_TEST_TLD_PARAM,
RUN_IN_EMPTY_PARAM,
EXCLUDE_PARAM,
JITTER_SECONDS_PARAM);
private static final Random random = new Random();
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
@Inject TaskQueueUtils taskQueueUtils;
@Inject Response response;
@Inject @Parameter(ENDPOINT_PARAM) String endpoint;
@Inject @Parameter(QUEUE_PARAM) String queue;
@Inject @Parameter(FOR_EACH_REAL_TLD_PARAM) boolean forEachRealTld;
@Inject @Parameter(FOR_EACH_TEST_TLD_PARAM) boolean forEachTestTld;
@Inject @Parameter(RUN_IN_EMPTY_PARAM) boolean runInEmpty;
@Inject @Parameter(EXCLUDE_PARAM) ImmutableSet<String> excludes;
@Inject @Parameter(JITTER_SECONDS_PARAM) Optional<Integer> jitterSeconds;
@Inject @ParameterMap ImmutableListMultimap<String, String> params;
@Inject TldFanoutAction() {}
@Override
public void run() {
checkArgument(
!(runInEmpty && (forEachTestTld || forEachRealTld)),
"runInEmpty and forEach*Tld are mutually exclusive");
checkArgument(
runInEmpty || forEachTestTld || forEachRealTld,
"At least one of runInEmpty, forEachTestTld, forEachRealTld must be given");
checkArgument(
!(runInEmpty && !excludes.isEmpty()),
"Can't specify 'exclude' with 'runInEmpty'");
ImmutableSet<String> tlds =
runInEmpty
? ImmutableSet.of("")
: Streams.concat(
forEachRealTld ? getTldsOfType(REAL).stream() : Stream.of(),
forEachTestTld ? getTldsOfType(TEST).stream() : Stream.of())
.filter(not(in(excludes)))
.collect(toImmutableSet());
Multimap<String, String> flowThruParams = filterKeys(params, not(in(CONTROL_PARAMS)));
Queue taskQueue = getQueue(queue);
StringBuilder outputPayload =
new StringBuilder(
String.format("OK: Launched the following %d tasks in queue %s\n", tlds.size(), queue));
logger.atInfo().log("Launching %d tasks in queue %s", tlds.size(), queue);
if (tlds.isEmpty()) {
logger.atWarning().log("No TLDs to fan-out!");
}
for (String tld : tlds) {
TaskOptions taskOptions = createTaskOptions(tld, flowThruParams);
TaskHandle taskHandle = taskQueueUtils.enqueue(taskQueue, taskOptions);
outputPayload.append(
String.format(
"- Task: '%s', tld: '%s', endpoint: '%s'\n",
taskHandle.getName(), tld, taskOptions.getUrl()));
logger.atInfo().log(
"Task: '%s', tld: '%s', endpoint: '%s'", taskHandle.getName(), tld, taskOptions.getUrl());
}
response.setContentType(PLAIN_TEXT_UTF_8);
response.setPayload(outputPayload.toString());
}
private TaskOptions createTaskOptions(String tld, Multimap<String, String> params) {
TaskOptions options =
withUrl(endpoint)
.countdownMillis(
jitterSeconds
.map(seconds -> random.nextInt((int) SECONDS.toMillis(seconds)))
.orElse(0));
if (!tld.isEmpty()) {
options.param(RequestParameters.PARAM_TLD, tld);
}
for (String param : params.keySet()) {
// TaskOptions.param() does not accept null values.
options.param(param, nullToEmpty(getFirst(params.get(param), null)));
}
return options;
}
}

View file

@ -0,0 +1,16 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
@javax.annotation.ParametersAreNonnullByDefault
package google.registry.cron;

View file

@ -0,0 +1,42 @@
# Routines to publish authoritative DNS.
package(
default_visibility = ["//visibility:public"],
)
licenses(["notice"]) # Apache 2.0
java_library(
name = "constants",
srcs = ["DnsConstants.java"],
)
java_library(
name = "dns",
srcs = glob(
["*.java"],
exclude = ["DnsConstants.java"],
),
deps = [
":constants",
"//java/google/registry/config",
"//java/google/registry/dns/writer",
"//java/google/registry/model",
"//java/google/registry/request",
"//java/google/registry/request/auth",
"//java/google/registry/request/lock",
"//java/google/registry/util",
"//third_party/objectify:objectify-v4_1",
"@com_google_appengine_api_1_0_sdk",
"@com_google_auto_value",
"@com_google_dagger",
"@com_google_flogger",
"@com_google_flogger_system_backend",
"@com_google_guava",
"@com_google_monitoring_client_metrics",
"@com_google_protobuf_java",
"@javax_inject",
"@javax_servlet_api",
"@joda_time",
],
)

View file

@ -0,0 +1,38 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns;
/** Static class for DNS-related constants. */
public class DnsConstants {
private DnsConstants() {}
/** The name of the DNS pull queue. */
public static final String DNS_PULL_QUEUE_NAME = "dns-pull"; // See queue.xml.
/** The name of the DNS publish push queue. */
public static final String DNS_PUBLISH_PUSH_QUEUE_NAME = "dns-publish"; // See queue.xml.
/** The parameter to use for storing the target type ("domain" or "host" or "zone"). */
public static final String DNS_TARGET_TYPE_PARAM = "Target-Type";
/** The parameter to use for storing the target name (domain or host name) with the task. */
public static final String DNS_TARGET_NAME_PARAM = "Target-Name";
/** The parameter to use for storing the creation time with the task. */
public static final String DNS_TARGET_CREATE_TIME_PARAM = "Create-Time";
/** The possible values of the {@code DNS_TARGET_TYPE_PARAM} parameter. */
public enum TargetType { DOMAIN, HOST, ZONE }
}

View file

@ -0,0 +1,265 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns;
import com.google.common.collect.ImmutableSet;
import com.google.monitoring.metrics.DistributionFitter;
import com.google.monitoring.metrics.EventMetric;
import com.google.monitoring.metrics.ExponentialFitter;
import com.google.monitoring.metrics.FibonacciFitter;
import com.google.monitoring.metrics.IncrementableMetric;
import com.google.monitoring.metrics.LabelDescriptor;
import com.google.monitoring.metrics.MetricRegistryImpl;
import google.registry.config.RegistryEnvironment;
import javax.inject.Inject;
import org.joda.time.Duration;
/** DNS instrumentation. */
// TODO(b/67947699):Once load testing is done, revisit these to rename them and delete the ones that
// we don't expect to need long-term.
public class DnsMetrics {
/** Disposition of a publish request. */
public enum PublishStatus { ACCEPTED, REJECTED }
/** Disposition of writer.commit(). */
public enum CommitStatus { SUCCESS, FAILURE }
/** Disposition of the publish action. */
public enum ActionStatus { SUCCESS, COMMIT_FAILURE, LOCK_FAILURE, BAD_WRITER, BAD_LOCK_INDEX }
private static final ImmutableSet<LabelDescriptor> LABEL_DESCRIPTORS_FOR_PUBLISH_REQUESTS =
ImmutableSet.of(
LabelDescriptor.create("tld", "TLD"),
LabelDescriptor.create(
"status", "Whether the publish request was accepted or rejected."));
private static final ImmutableSet<LabelDescriptor> LABEL_DESCRIPTORS_FOR_COMMIT =
ImmutableSet.of(
LabelDescriptor.create("tld", "TLD"),
LabelDescriptor.create("status", "Whether writer.commit() succeeded or failed."),
LabelDescriptor.create("dnsWriter", "The DnsWriter used."));
private static final ImmutableSet<LabelDescriptor> LABEL_DESCRIPTORS_FOR_LATENCY =
ImmutableSet.of(
LabelDescriptor.create("tld", "TLD"),
LabelDescriptor.create("status", "Whether the publish succeeded, or why it failed."),
LabelDescriptor.create("dnsWriter", "The DnsWriter used."));
// Finer-grained fitter than the DEFAULT_FITTER, allows values between 100 ms and just over 29
// hours.
private static final DistributionFitter EXPONENTIAL_FITTER =
ExponentialFitter.create(20, 2.0, 100.0);
// Fibonacci fitter more suitible for integer-type values. Allows values between 0 and 10946,
// which is the 21th Fibonacci number.
private static final DistributionFitter FIBONACCI_FITTER =
FibonacciFitter.create(10946);
private static final IncrementableMetric publishDomainRequests =
MetricRegistryImpl.getDefault()
.newIncrementableMetric(
"/dns/publish_domain_requests",
"count of publishDomain requests",
"count",
LABEL_DESCRIPTORS_FOR_PUBLISH_REQUESTS);
private static final IncrementableMetric publishHostRequests =
MetricRegistryImpl.getDefault()
.newIncrementableMetric(
"/dns/publish_host_requests",
"count of publishHost requests",
"count",
LABEL_DESCRIPTORS_FOR_PUBLISH_REQUESTS);
private static final IncrementableMetric commitCount =
MetricRegistryImpl.getDefault()
.newIncrementableMetric(
"/dns/commit_requests",
"Count of writer.commit() calls",
"count",
LABEL_DESCRIPTORS_FOR_COMMIT);
private static final IncrementableMetric domainsCommittedCount =
MetricRegistryImpl.getDefault()
.newIncrementableMetric(
"/dns/domains_committed",
"Count of domains committed",
"count",
LABEL_DESCRIPTORS_FOR_COMMIT);
private static final IncrementableMetric hostsCommittedCount =
MetricRegistryImpl.getDefault()
.newIncrementableMetric(
"/dns/hosts_committed",
"Count of hosts committed",
"count",
LABEL_DESCRIPTORS_FOR_COMMIT);
private static final EventMetric processingTimePerCommitDist =
MetricRegistryImpl.getDefault()
.newEventMetric(
"/dns/per_batch/processing_time",
"publishDnsUpdates Processing Time",
"milliseconds",
LABEL_DESCRIPTORS_FOR_COMMIT,
EXPONENTIAL_FITTER);
private static final EventMetric normalizedProcessingTimePerCommitDist =
MetricRegistryImpl.getDefault()
.newEventMetric(
"/dns/per_batch/processing_time_per_dns_update",
"publishDnsUpdates Processing Time, divided by the batch size",
"milliseconds",
LABEL_DESCRIPTORS_FOR_COMMIT,
EXPONENTIAL_FITTER);
private static final EventMetric totalBatchSizePerCommitDist =
MetricRegistryImpl.getDefault()
.newEventMetric(
"/dns/per_batch/batch_size",
"Number of hosts and domains committed in each publishDnsUpdates",
"count",
LABEL_DESCRIPTORS_FOR_COMMIT,
FIBONACCI_FITTER);
private static final EventMetric processingTimePerItemDist =
MetricRegistryImpl.getDefault()
.newEventMetric(
"/dns/per_item/processing_time",
"publishDnsUpdates Processing Time",
"milliseconds",
LABEL_DESCRIPTORS_FOR_COMMIT,
EXPONENTIAL_FITTER);
private static final EventMetric normalizedProcessingTimePerItemDist =
MetricRegistryImpl.getDefault()
.newEventMetric(
"/dns/per_item/processing_time_per_dns_update",
"publishDnsUpdates Processing Time, divided by the batch size",
"milliseconds",
LABEL_DESCRIPTORS_FOR_COMMIT,
EXPONENTIAL_FITTER);
private static final EventMetric totalBatchSizePerItemDist =
MetricRegistryImpl.getDefault()
.newEventMetric(
"/dns/per_item/batch_size",
"Batch sizes for hosts and domains",
"count",
LABEL_DESCRIPTORS_FOR_COMMIT,
FIBONACCI_FITTER);
private static final EventMetric updateRequestLatency =
MetricRegistryImpl.getDefault()
.newEventMetric(
"/dns/update_latency",
"Time elapsed since refresh request was created until it was published",
"milliseconds",
LABEL_DESCRIPTORS_FOR_LATENCY,
EXPONENTIAL_FITTER);
private static final EventMetric publishQueueDelay =
MetricRegistryImpl.getDefault()
.newEventMetric(
"/dns/publish_queue_delay",
"Time elapsed since the publishDnsUpdates action was created until it was executed",
"milliseconds",
LABEL_DESCRIPTORS_FOR_LATENCY,
EXPONENTIAL_FITTER);
@Inject RegistryEnvironment registryEnvironment;
@Inject
DnsMetrics() {}
/**
* Increment a monotonic counter that tracks calls to {@link
* google.registry.dns.writer.DnsWriter#publishDomain(String)}, per TLD.
*/
public void incrementPublishDomainRequests(String tld, long numRequests, PublishStatus status) {
if (numRequests > 0) {
publishDomainRequests.incrementBy(numRequests, tld, status.name());
}
}
/**
* Increment a monotonic counter that tracks calls to {@link
* google.registry.dns.writer.DnsWriter#publishHost(String)}, per TLD.
*/
public void incrementPublishHostRequests(String tld, long numRequests, PublishStatus status) {
if (numRequests > 0) {
publishHostRequests.incrementBy(numRequests, tld, status.name());
}
}
/**
* Measures information about the entire batched commit, per TLD.
*
* <p>The information includes running times (per item and per commit), and batch sizes (per item
* and per commit)
*
* <p>This is to be used for load testing the system, and will not measure anything in prod.
*/
void recordCommit(
String tld,
String dnsWriter,
CommitStatus status,
Duration processingDuration,
int numberOfDomains,
int numberOfHosts) {
commitCount.increment(tld, status.name(), dnsWriter);
domainsCommittedCount.incrementBy(numberOfDomains, tld, status.name(), dnsWriter);
hostsCommittedCount.incrementBy(numberOfHosts, tld, status.name(), dnsWriter);
// We don't want to record the following metrics in production, as they are quite expensive
if (registryEnvironment == RegistryEnvironment.PRODUCTION) {
return;
}
int batchSize = numberOfDomains + numberOfHosts;
processingTimePerCommitDist.record(
processingDuration.getMillis(), tld, status.name(), dnsWriter);
processingTimePerItemDist.record(
processingDuration.getMillis(), batchSize, tld, status.name(), dnsWriter);
if (batchSize > 0) {
normalizedProcessingTimePerCommitDist.record(
(double) processingDuration.getMillis() / batchSize,
tld, status.name(), dnsWriter);
normalizedProcessingTimePerItemDist.record(
(double) processingDuration.getMillis() / batchSize,
batchSize,
tld, status.name(), dnsWriter);
}
totalBatchSizePerCommitDist.record(batchSize, tld, status.name(), dnsWriter);
totalBatchSizePerItemDist.record(batchSize, batchSize, tld, status.name(), dnsWriter);
}
void recordActionResult(
String tld,
String dnsWriter,
ActionStatus status,
int numberOfItems,
Duration timeSinceUpdateRequest,
Duration timeSinceActionEnqueued) {
updateRequestLatency.record(
timeSinceUpdateRequest.getMillis(), numberOfItems, tld, status.name(), dnsWriter);
publishQueueDelay.record(timeSinceActionEnqueued.getMillis(), tld, status.name(), dnsWriter);
}
}

View file

@ -0,0 +1,132 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns;
import static google.registry.dns.DnsConstants.DNS_PUBLISH_PUSH_QUEUE_NAME;
import static google.registry.dns.DnsConstants.DNS_PULL_QUEUE_NAME;
import static google.registry.request.RequestParameters.extractEnumParameter;
import static google.registry.request.RequestParameters.extractIntParameter;
import static google.registry.request.RequestParameters.extractRequiredParameter;
import static google.registry.request.RequestParameters.extractSetOfParameters;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.QueueFactory;
import com.google.common.hash.HashFunction;
import com.google.common.hash.Hashing;
import dagger.Binds;
import dagger.Module;
import dagger.Provides;
import google.registry.dns.DnsConstants.TargetType;
import google.registry.dns.writer.DnsWriterZone;
import google.registry.request.Parameter;
import google.registry.request.RequestParameters;
import java.util.Set;
import javax.inject.Named;
import javax.servlet.http.HttpServletRequest;
import org.joda.time.DateTime;
/** Dagger module for the dns package. */
@Module
public abstract class DnsModule {
public static final String PARAM_DNS_WRITER = "dnsWriter";
public static final String PARAM_LOCK_INDEX = "lockIndex";
public static final String PARAM_NUM_PUBLISH_LOCKS = "numPublishLocks";
public static final String PARAM_DOMAINS = "domains";
public static final String PARAM_HOSTS = "hosts";
public static final String PARAM_PUBLISH_TASK_ENQUEUED = "enqueued";
public static final String PARAM_REFRESH_REQUEST_CREATED = "itemsCreated";
@Binds
@DnsWriterZone
abstract String provideZoneName(@Parameter(RequestParameters.PARAM_TLD) String tld);
/**
* Provides a HashFunction used for generating sharded DNS publish queue tasks.
*
* <p>We use murmur3_32 because it isn't subject to change, and is fast (non-cryptographic, which
* would be overkill in this situation.)
*/
@Provides
static HashFunction provideHashFunction() {
return Hashing.murmur3_32();
}
@Provides
@Named(DNS_PULL_QUEUE_NAME)
static Queue provideDnsPullQueue() {
return QueueFactory.getQueue(DNS_PULL_QUEUE_NAME);
}
@Provides
@Named(DNS_PUBLISH_PUSH_QUEUE_NAME)
static Queue provideDnsUpdatePushQueue() {
return QueueFactory.getQueue(DNS_PUBLISH_PUSH_QUEUE_NAME);
}
@Provides
@Parameter(PARAM_PUBLISH_TASK_ENQUEUED)
static DateTime provideCreateTime(HttpServletRequest req) {
return DateTime.parse(extractRequiredParameter(req, PARAM_PUBLISH_TASK_ENQUEUED));
}
@Provides
@Parameter(PARAM_REFRESH_REQUEST_CREATED)
static DateTime provideItemsCreateTime(HttpServletRequest req) {
return DateTime.parse(extractRequiredParameter(req, PARAM_REFRESH_REQUEST_CREATED));
}
@Provides
@Parameter(PARAM_DNS_WRITER)
static String provideDnsWriter(HttpServletRequest req) {
return extractRequiredParameter(req, PARAM_DNS_WRITER);
}
@Provides
@Parameter(PARAM_LOCK_INDEX)
static int provideLockIndex(HttpServletRequest req) {
return extractIntParameter(req, PARAM_LOCK_INDEX);
}
@Provides
@Parameter(PARAM_NUM_PUBLISH_LOCKS)
static int provideMaxNumLocks(HttpServletRequest req) {
return extractIntParameter(req, PARAM_NUM_PUBLISH_LOCKS);
}
@Provides
@Parameter(PARAM_DOMAINS)
static Set<String> provideDomains(HttpServletRequest req) {
return extractSetOfParameters(req, PARAM_DOMAINS);
}
@Provides
@Parameter(PARAM_HOSTS)
static Set<String> provideHosts(HttpServletRequest req) {
return extractSetOfParameters(req, PARAM_HOSTS);
}
@Provides
@Parameter("domainOrHostName")
static String provideName(HttpServletRequest req) {
return extractRequiredParameter(req, "name");
}
@Provides
@Parameter("type")
static TargetType provideType(HttpServletRequest req) {
return extractEnumParameter(req, TargetType.class, "type");
}
}

View file

@ -0,0 +1,186 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns;
import static com.google.appengine.api.taskqueue.QueueFactory.getQueue;
import static com.google.common.base.Preconditions.checkArgument;
import static google.registry.dns.DnsConstants.DNS_PULL_QUEUE_NAME;
import static google.registry.dns.DnsConstants.DNS_TARGET_CREATE_TIME_PARAM;
import static google.registry.dns.DnsConstants.DNS_TARGET_NAME_PARAM;
import static google.registry.dns.DnsConstants.DNS_TARGET_TYPE_PARAM;
import static google.registry.model.registry.Registries.assertTldExists;
import static google.registry.request.RequestParameters.PARAM_TLD;
import static google.registry.util.DomainNameUtils.getTldFromDomainName;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.QueueConstants;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.appengine.api.taskqueue.TaskOptions.Method;
import com.google.appengine.api.taskqueue.TransientFailureException;
import com.google.apphosting.api.DeadlineExceededException;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import com.google.common.flogger.FluentLogger;
import com.google.common.net.InternetDomainName;
import com.google.common.util.concurrent.RateLimiter;
import google.registry.dns.DnsConstants.TargetType;
import google.registry.model.registry.Registries;
import google.registry.util.Clock;
import google.registry.util.NonFinalForTesting;
import google.registry.util.SystemClock;
import java.util.List;
import java.util.Optional;
import java.util.logging.Level;
import javax.inject.Inject;
import javax.inject.Named;
import org.joda.time.Duration;
/**
* Methods for manipulating the queue used for DNS write tasks.
*
* <p>This includes a {@link RateLimiter} to limit the {@link Queue#leaseTasks} call rate to 9 QPS,
* to stay under the 10 QPS limit for this function.
*
* <p>Note that overlapping calls to {@link ReadDnsQueueAction} (the only place where
* {@link DnsQueue#leaseTasks} is used) will have different rate limiters, so they could exceed the
* allowed rate. This should be rare though - because {@link DnsQueue#leaseTasks} is only used in
* {@link ReadDnsQueueAction}, which is run as a cron job with running time shorter than the cron
* repeat time - meaning there should never be two instances running at once.
*
* @see google.registry.config.RegistryConfig.ConfigModule#provideReadDnsQueueRuntime
*/
public class DnsQueue {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private final Queue queue;
final Clock clock;
// Queue.leaseTasks is limited to 10 requests per second as per
// https://cloud.google.com/appengine/docs/standard/java/javadoc/com/google/appengine/api/taskqueue/Queue.html
// "If you generate more than 10 LeaseTasks requests per second, only the first 10 requests will
// return results. The others will return no results."
private static final RateLimiter rateLimiter = RateLimiter.create(9);
@Inject
public DnsQueue(@Named(DNS_PULL_QUEUE_NAME) Queue queue, Clock clock) {
this.queue = queue;
this.clock = clock;
}
/**
* Constructs a new instance.
*
* <p><b>Note:</b> Prefer <code>@Inject</code>ing DnsQueue instances instead. You should only use
* this helper method in situations for which injection does not work, e.g. inside mapper or
* reducer classes in mapreduces that need to be Serializable.
*/
public static DnsQueue create() {
return new DnsQueue(getQueue(DNS_PULL_QUEUE_NAME), new SystemClock());
}
@VisibleForTesting
public static DnsQueue createForTesting(Clock clock) {
return new DnsQueue(getQueue(DNS_PULL_QUEUE_NAME), clock);
}
@NonFinalForTesting
@VisibleForTesting
long leaseTasksBatchSize = QueueConstants.maxLeaseCount();
/** Enqueues the given task type with the given target name to the DNS queue. */
private TaskHandle addToQueue(
TargetType targetType, String targetName, String tld, Duration countdown) {
logger.atInfo().log(
"Adding task type=%s, target=%s, tld=%s to pull queue %s (%d tasks currently on queue)",
targetType, targetName, tld, DNS_PULL_QUEUE_NAME, queue.fetchStatistics().getNumTasks());
return queue.add(
TaskOptions.Builder.withDefaults()
.method(Method.PULL)
.countdownMillis(countdown.getMillis())
.param(DNS_TARGET_TYPE_PARAM, targetType.toString())
.param(DNS_TARGET_NAME_PARAM, targetName)
.param(DNS_TARGET_CREATE_TIME_PARAM, clock.nowUtc().toString())
.param(PARAM_TLD, tld));
}
/**
* Adds a task to the queue to refresh the DNS information for the specified subordinate host.
*/
public TaskHandle addHostRefreshTask(String fullyQualifiedHostName) {
Optional<InternetDomainName> tld =
Registries.findTldForName(InternetDomainName.from(fullyQualifiedHostName));
checkArgument(tld.isPresent(),
String.format("%s is not a subordinate host to a known tld", fullyQualifiedHostName));
return addToQueue(TargetType.HOST, fullyQualifiedHostName, tld.get().toString(), Duration.ZERO);
}
/** Enqueues a task to refresh DNS for the specified domain now. */
public TaskHandle addDomainRefreshTask(String fullyQualifiedDomainName) {
return addDomainRefreshTask(fullyQualifiedDomainName, Duration.ZERO);
}
/** Enqueues a task to refresh DNS for the specified domain at some point in the future. */
public TaskHandle addDomainRefreshTask(String fullyQualifiedDomainName, Duration countdown) {
return addToQueue(
TargetType.DOMAIN,
fullyQualifiedDomainName,
assertTldExists(getTldFromDomainName(fullyQualifiedDomainName)),
countdown);
}
/** Adds a task to the queue to refresh the DNS information for the specified zone. */
public TaskHandle addZoneRefreshTask(String fullyQualifiedZoneName) {
return addToQueue(
TargetType.ZONE, fullyQualifiedZoneName, fullyQualifiedZoneName, Duration.ZERO);
}
/**
* Returns the maximum number of tasks that can be leased with {@link #leaseTasks}.
*
* <p>If this many tasks are returned, then there might be more tasks still waiting in the queue.
*
* <p>If less than this number of tasks are returned, then there are no more items in the queue.
*/
public long getLeaseTasksBatchSize() {
return leaseTasksBatchSize;
}
/** Returns handles for a batch of tasks, leased for the specified duration. */
public List<TaskHandle> leaseTasks(Duration leaseDuration) {
try {
rateLimiter.acquire();
int numTasks = queue.fetchStatistics().getNumTasks();
logger.at((numTasks >= leaseTasksBatchSize) ? Level.WARNING : Level.INFO).log(
"There are %d tasks in the DNS queue '%s'.", numTasks, DNS_PULL_QUEUE_NAME);
return queue.leaseTasks(leaseDuration.getMillis(), MILLISECONDS, leaseTasksBatchSize);
} catch (TransientFailureException | DeadlineExceededException e) {
logger.atSevere().withCause(e).log("Failed leasing tasks too fast");
return ImmutableList.of();
}
}
/** Delete a list of tasks, removing them from the queue permanently. */
public void deleteTasks(List<TaskHandle> tasks) {
try {
queue.deleteTask(tasks);
} catch (TransientFailureException | DeadlineExceededException e) {
logger.atSevere().withCause(e).log("Failed deleting tasks too fast");
}
}
}

View file

@ -0,0 +1,53 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns;
import static com.google.common.base.Preconditions.checkState;
import com.google.common.collect.ImmutableMap;
import com.google.common.flogger.FluentLogger;
import google.registry.dns.writer.DnsWriter;
import google.registry.model.registry.Registry;
import java.util.Map;
import javax.inject.Inject;
/** Proxy for retrieving {@link DnsWriter} implementations. */
public final class DnsWriterProxy {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private final ImmutableMap<String, DnsWriter> dnsWriters;
@Inject
DnsWriterProxy(Map<String, DnsWriter> dnsWriters) {
this.dnsWriters = ImmutableMap.copyOf(dnsWriters);
}
/**
* Returns the instance of {@link DnsWriter} by class name.
*
* If the DnsWriter doesn't belong to this TLD, will return null.
*/
public DnsWriter getByClassNameForTld(String className, String tld) {
if (!Registry.get(tld).getDnsWriters().contains(className)) {
logger.atWarning().log(
"Loaded potentially stale DNS writer %s which is not active on TLD %s.", className, tld);
return null;
}
DnsWriter dnsWriter = dnsWriters.get(className);
checkState(dnsWriter != null, "Could not load DnsWriter %s for TLD %s", className, tld);
return dnsWriter;
}
}

View file

@ -0,0 +1,247 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns;
import static google.registry.dns.DnsModule.PARAM_DNS_WRITER;
import static google.registry.dns.DnsModule.PARAM_DOMAINS;
import static google.registry.dns.DnsModule.PARAM_HOSTS;
import static google.registry.dns.DnsModule.PARAM_LOCK_INDEX;
import static google.registry.dns.DnsModule.PARAM_NUM_PUBLISH_LOCKS;
import static google.registry.dns.DnsModule.PARAM_PUBLISH_TASK_ENQUEUED;
import static google.registry.dns.DnsModule.PARAM_REFRESH_REQUEST_CREATED;
import static google.registry.request.Action.Method.POST;
import static google.registry.request.RequestParameters.PARAM_TLD;
import static google.registry.util.CollectionUtils.nullToEmpty;
import com.google.common.flogger.FluentLogger;
import com.google.common.net.InternetDomainName;
import google.registry.config.RegistryConfig.Config;
import google.registry.dns.DnsMetrics.ActionStatus;
import google.registry.dns.DnsMetrics.CommitStatus;
import google.registry.dns.DnsMetrics.PublishStatus;
import google.registry.dns.writer.DnsWriter;
import google.registry.model.registry.Registry;
import google.registry.request.Action;
import google.registry.request.HttpException.ServiceUnavailableException;
import google.registry.request.Parameter;
import google.registry.request.auth.Auth;
import google.registry.request.lock.LockHandler;
import google.registry.util.Clock;
import google.registry.util.DomainNameUtils;
import java.util.Set;
import java.util.concurrent.Callable;
import javax.inject.Inject;
import org.joda.time.DateTime;
import org.joda.time.Duration;
/** Task that sends domain and host updates to the DNS server. */
@Action(
service = Action.Service.BACKEND,
path = PublishDnsUpdatesAction.PATH,
method = POST,
automaticallyPrintOk = true,
auth = Auth.AUTH_INTERNAL_ONLY)
public final class PublishDnsUpdatesAction implements Runnable, Callable<Void> {
public static final String PATH = "/_dr/task/publishDnsUpdates";
public static final String LOCK_NAME = "DNS updates";
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
@Inject DnsQueue dnsQueue;
@Inject DnsWriterProxy dnsWriterProxy;
@Inject DnsMetrics dnsMetrics;
@Inject @Config("publishDnsUpdatesLockDuration") Duration timeout;
/**
* The DNS writer to use for this batch.
*
* <p>This comes from the fanout in {@link ReadDnsQueueAction} which dispatches each batch to be
* published by each DNS writer on the TLD. So this field contains the value of one of the DNS
* writers configured in {@link Registry#getDnsWriters()}, as of the time the batch was written
* out (and not necessarily currently).
*/
@Inject @Parameter(PARAM_DNS_WRITER) String dnsWriter;
@Inject
@Parameter(PARAM_PUBLISH_TASK_ENQUEUED)
DateTime enqueuedTime;
@Inject
@Parameter(PARAM_REFRESH_REQUEST_CREATED)
DateTime itemsCreateTime;
@Inject @Parameter(PARAM_LOCK_INDEX) int lockIndex;
@Inject @Parameter(PARAM_NUM_PUBLISH_LOCKS) int numPublishLocks;
@Inject @Parameter(PARAM_DOMAINS) Set<String> domains;
@Inject @Parameter(PARAM_HOSTS) Set<String> hosts;
@Inject @Parameter(PARAM_TLD) String tld;
@Inject LockHandler lockHandler;
@Inject Clock clock;
@Inject PublishDnsUpdatesAction() {}
private void recordActionResult(ActionStatus status) {
DateTime now = clock.nowUtc();
dnsMetrics.recordActionResult(
tld,
dnsWriter,
status,
nullToEmpty(domains).size() + nullToEmpty(hosts).size(),
new Duration(itemsCreateTime, now),
new Duration(enqueuedTime, now));
logger.atInfo().log(
"publishDnsWriter latency statistics: TLD: %s, dnsWriter: %s, actionStatus: %s, "
+ "numItems: %d, timeSinceCreation: %s, timeInQueue: %s",
tld,
dnsWriter,
status,
nullToEmpty(domains).size() + nullToEmpty(hosts).size(),
new Duration(itemsCreateTime, now),
new Duration(enqueuedTime, now));
}
/** Runs the task. */
@Override
public void run() {
if (!validLockParams()) {
recordActionResult(ActionStatus.BAD_LOCK_INDEX);
requeueBatch();
return;
}
// If executeWithLocks fails to get the lock, it does not throw an exception, simply returns
// false. We need to make sure to take note of this error; otherwise, a failed lock might result
// in the update task being dequeued and dropped. A message will already have been logged
// to indicate the problem.
if (!lockHandler.executeWithLocks(
this,
tld,
timeout,
String.format("%s-lock %d of %d", LOCK_NAME, lockIndex, numPublishLocks))) {
recordActionResult(ActionStatus.LOCK_FAILURE);
throw new ServiceUnavailableException("Lock failure");
}
}
/** Runs the task, with the lock. */
@Override
public Void call() {
processBatch();
return null;
}
/** Adds all the domains and hosts in the batch back to the queue to be processed later. */
private void requeueBatch() {
logger.atInfo().log("Requeueing batch for retry");
for (String domain : nullToEmpty(domains)) {
dnsQueue.addDomainRefreshTask(domain);
}
for (String host : nullToEmpty(hosts)) {
dnsQueue.addHostRefreshTask(host);
}
}
/** Returns if the lock parameters are valid for this action. */
private boolean validLockParams() {
// LockIndex should always be within [1, numPublishLocks]
if (lockIndex > numPublishLocks || lockIndex <= 0) {
logger.atSevere().log(
"Lock index should be within [1,%d], got %d instead", numPublishLocks, lockIndex);
return false;
}
// Check if the Registry object's num locks has changed since this task was batched
int registryNumPublishLocks = Registry.get(tld).getNumDnsPublishLocks();
if (registryNumPublishLocks != numPublishLocks) {
logger.atWarning().log(
"Registry numDnsPublishLocks %d out of sync with parameter %d",
registryNumPublishLocks, numPublishLocks);
return false;
}
return true;
}
/** Steps through the domain and host refreshes contained in the parameters and processes them. */
private void processBatch() {
DateTime timeAtStart = clock.nowUtc();
DnsWriter writer = dnsWriterProxy.getByClassNameForTld(dnsWriter, tld);
if (writer == null) {
logger.atWarning().log("Couldn't get writer %s for TLD %s", dnsWriter, tld);
recordActionResult(ActionStatus.BAD_WRITER);
requeueBatch();
return;
}
int domainsPublished = 0;
int domainsRejected = 0;
for (String domain : nullToEmpty(domains)) {
if (!DomainNameUtils.isUnder(
InternetDomainName.from(domain), InternetDomainName.from(tld))) {
logger.atSevere().log("%s: skipping domain %s not under tld", tld, domain);
domainsRejected += 1;
} else {
writer.publishDomain(domain);
logger.atInfo().log("%s: published domain %s", tld, domain);
domainsPublished += 1;
}
}
dnsMetrics.incrementPublishDomainRequests(tld, domainsPublished, PublishStatus.ACCEPTED);
dnsMetrics.incrementPublishDomainRequests(tld, domainsRejected, PublishStatus.REJECTED);
int hostsPublished = 0;
int hostsRejected = 0;
for (String host : nullToEmpty(hosts)) {
if (!DomainNameUtils.isUnder(
InternetDomainName.from(host), InternetDomainName.from(tld))) {
logger.atSevere().log("%s: skipping host %s not under tld", tld, host);
hostsRejected += 1;
} else {
writer.publishHost(host);
logger.atInfo().log("%s: published host %s", tld, host);
hostsPublished += 1;
}
}
dnsMetrics.incrementPublishHostRequests(tld, hostsPublished, PublishStatus.ACCEPTED);
dnsMetrics.incrementPublishHostRequests(tld, hostsRejected, PublishStatus.REJECTED);
// If we got here it means we managed to stage the entire batch without any errors.
// Next we will commit the batch.
CommitStatus commitStatus = CommitStatus.FAILURE;
ActionStatus actionStatus = ActionStatus.COMMIT_FAILURE;
try {
writer.commit();
// No error was thrown
commitStatus = CommitStatus.SUCCESS;
actionStatus = ActionStatus.SUCCESS;
} finally {
recordActionResult(actionStatus);
Duration duration = new Duration(timeAtStart, clock.nowUtc());
dnsMetrics.recordCommit(
tld, dnsWriter, commitStatus, duration, domainsPublished, hostsPublished);
logger.atInfo().log(
"writer.commit() statistics: TLD: %s, dnsWriter: %s, commitStatus: %s, duration: %s, "
+ "domainsPublished: %d, domainsRejected: %d, hostsPublished: %d, hostsRejected: %d",
tld,
dnsWriter,
commitStatus,
duration,
domainsPublished,
domainsRejected,
hostsPublished,
hostsRejected);
}
}
}

View file

@ -0,0 +1,393 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns;
import static com.google.common.collect.ImmutableSetMultimap.toImmutableSetMultimap;
import static com.google.common.collect.Sets.difference;
import static google.registry.dns.DnsConstants.DNS_PUBLISH_PUSH_QUEUE_NAME;
import static google.registry.dns.DnsConstants.DNS_TARGET_CREATE_TIME_PARAM;
import static google.registry.dns.DnsConstants.DNS_TARGET_NAME_PARAM;
import static google.registry.dns.DnsConstants.DNS_TARGET_TYPE_PARAM;
import static google.registry.dns.DnsModule.PARAM_DNS_WRITER;
import static google.registry.dns.DnsModule.PARAM_DOMAINS;
import static google.registry.dns.DnsModule.PARAM_HOSTS;
import static google.registry.dns.DnsModule.PARAM_LOCK_INDEX;
import static google.registry.dns.DnsModule.PARAM_NUM_PUBLISH_LOCKS;
import static google.registry.dns.DnsModule.PARAM_PUBLISH_TASK_ENQUEUED;
import static google.registry.dns.DnsModule.PARAM_REFRESH_REQUEST_CREATED;
import static google.registry.request.RequestParameters.PARAM_TLD;
import static google.registry.util.DomainNameUtils.getSecondLevelDomain;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.concurrent.TimeUnit.SECONDS;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.TaskHandle;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.auto.value.AutoValue;
import com.google.common.collect.ComparisonChain;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSetMultimap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Ordering;
import com.google.common.flogger.FluentLogger;
import com.google.common.hash.HashFunction;
import com.google.common.hash.Hashing;
import google.registry.config.RegistryConfig.Config;
import google.registry.dns.DnsConstants.TargetType;
import google.registry.model.registry.Registries;
import google.registry.model.registry.Registry;
import google.registry.request.Action;
import google.registry.request.Parameter;
import google.registry.request.auth.Auth;
import google.registry.util.Clock;
import google.registry.util.TaskQueueUtils;
import java.io.UnsupportedEncodingException;
import java.util.Collection;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Random;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Named;
import org.joda.time.DateTime;
import org.joda.time.Duration;
/**
* Action for fanning out DNS refresh tasks by TLD, using data taken from the DNS pull queue.
*
* <h3>Parameters Reference</h3>
*
* <ul>
* <li>{@code jitterSeconds} Randomly delay each task by up to this many seconds.
* </ul>
*/
@Action(
service = Action.Service.BACKEND,
path = "/_dr/cron/readDnsQueue",
automaticallyPrintOk = true,
auth = Auth.AUTH_INTERNAL_ONLY)
public final class ReadDnsQueueAction implements Runnable {
private static final String PARAM_JITTER_SECONDS = "jitterSeconds";
private static final Random random = new Random();
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
/**
* Buffer time since the end of this action until retriable tasks are available again.
*
* <p>We read batches of tasks from the queue in a loop. Any task that will need to be retried has
* to be kept out of the queue for the duration of this action - otherwise we will lease it again
* in a subsequent loop.
*
* <p>The 'requestedMaximumDuration' value is the maximum delay between the first and last calls
* to lease tasks, hence we want the lease duration to be (slightly) longer than that.
* LEASE_PADDING is the value we add to {@link #requestedMaximumDuration} to make sure the lease
* duration is indeed longer.
*/
private static final Duration LEASE_PADDING = Duration.standardMinutes(1);
@Inject @Config("dnsTldUpdateBatchSize") int tldUpdateBatchSize;
@Inject @Config("readDnsQueueActionRuntime") Duration requestedMaximumDuration;
@Inject @Named(DNS_PUBLISH_PUSH_QUEUE_NAME) Queue dnsPublishPushQueue;
@Inject @Parameter(PARAM_JITTER_SECONDS) Optional<Integer> jitterSeconds;
@Inject Clock clock;
@Inject DnsQueue dnsQueue;
@Inject HashFunction hashFunction;
@Inject TaskQueueUtils taskQueueUtils;
@Inject ReadDnsQueueAction() {}
/** Container for items we pull out of the DNS pull queue and process for fanout. */
@AutoValue
abstract static class RefreshItem implements Comparable<RefreshItem> {
static RefreshItem create(TargetType type, String name, DateTime creationTime) {
return new AutoValue_ReadDnsQueueAction_RefreshItem(type, name, creationTime);
}
abstract TargetType type();
abstract String name();
abstract DateTime creationTime();
@Override
public int compareTo(RefreshItem other) {
return ComparisonChain.start()
.compare(this.type(), other.type())
.compare(this.name(), other.name())
.compare(this.creationTime(), other.creationTime())
.result();
}
}
/** Leases all tasks from the pull queue and creates per-tld update actions for them. */
@Override
public void run() {
DateTime requestedEndTime = clock.nowUtc().plus(requestedMaximumDuration);
ImmutableSet<String> tlds = Registries.getTlds();
while (requestedEndTime.isAfterNow()) {
List<TaskHandle> tasks = dnsQueue.leaseTasks(requestedMaximumDuration.plus(LEASE_PADDING));
logger.atInfo().log("Leased %d DNS update tasks.", tasks.size());
if (!tasks.isEmpty()) {
dispatchTasks(ImmutableSet.copyOf(tasks), tlds);
}
if (tasks.size() < dnsQueue.getLeaseTasksBatchSize()) {
return;
}
}
}
/** A set of tasks grouped based on the action to take on them. */
@AutoValue
abstract static class ClassifiedTasks {
/**
* List of tasks we want to keep in the queue (want to retry in the future).
*
* <p>Normally, any task we lease from the queue will be deleted - either because we are going
* to process it now (these tasks are part of refreshItemsByTld), or because these tasks are
* "corrupt" in some way (don't parse, don't have the required parameters etc.).
*
* <p>Some tasks however are valid, but can't be processed at the moment. These tasks will be
* kept in the queue for future processing.
*
* <p>This includes tasks belonging to paused TLDs (which we want to process once the TLD is
* unpaused) and tasks belonging to (currently) unknown TLDs.
*/
abstract ImmutableSet<TaskHandle> tasksToKeep();
/** The paused TLDs for which we found at least one refresh request. */
abstract ImmutableSet<String> pausedTlds();
/**
* The unknown TLDs for which we found at least one refresh request.
*
* <p>Unknown TLDs might have valid requests because the list of TLDs is heavily cached. Hence,
* when we add a new TLD - it is possible that some instances will not have that TLD in their
* list yet. We don't want to discard these tasks, so we wait a bit and retry them again.
*
* <p>This is less likely for production TLDs but is quite likely for test TLDs where we might
* create a TLD and then use it within seconds.
*/
abstract ImmutableSet<String> unknownTlds();
/**
* All the refresh items we need to actually fulfill, grouped by TLD.
*
* <p>By default, the multimap is ordered - this can be changed with the builder.
*
* <p>The items for each TLD will be grouped together, and domains and hosts will be grouped
* within a TLD.
*
* <p>The grouping and ordering of domains and hosts is not technically necessary, but a
* predictable ordering makes it possible to write detailed tests.
*/
abstract ImmutableSetMultimap<String, RefreshItem> refreshItemsByTld();
static Builder builder() {
Builder builder = new AutoValue_ReadDnsQueueAction_ClassifiedTasks.Builder();
builder
.refreshItemsByTldBuilder()
.orderKeysBy(Ordering.natural())
.orderValuesBy(Ordering.natural());
return builder;
}
@AutoValue.Builder
abstract static class Builder {
abstract ImmutableSet.Builder<TaskHandle> tasksToKeepBuilder();
abstract ImmutableSet.Builder<String> pausedTldsBuilder();
abstract ImmutableSet.Builder<String> unknownTldsBuilder();
abstract ImmutableSetMultimap.Builder<String, RefreshItem> refreshItemsByTldBuilder();
abstract ClassifiedTasks build();
}
}
/**
* Creates per-tld update actions for tasks leased from the pull queue.
*
* <p>Will return "irrelevant" tasks to the queue for future processing. "Irrelevant" tasks are
* tasks for paused TLDs or tasks for TLDs not part of {@link Registries#getTlds()}.
*/
private void dispatchTasks(ImmutableSet<TaskHandle> tasks, ImmutableSet<String> tlds) {
ClassifiedTasks classifiedTasks = classifyTasks(tasks, tlds);
if (!classifiedTasks.pausedTlds().isEmpty()) {
logger.atInfo().log(
"The dns-pull queue is paused for TLDs: %s.", classifiedTasks.pausedTlds());
}
if (!classifiedTasks.unknownTlds().isEmpty()) {
logger.atWarning().log(
"The dns-pull queue has unknown TLDs: %s.", classifiedTasks.unknownTlds());
}
bucketRefreshItems(classifiedTasks.refreshItemsByTld());
if (!classifiedTasks.tasksToKeep().isEmpty()) {
logger.atWarning().log(
"Keeping %d DNS update tasks in the queue.", classifiedTasks.tasksToKeep().size());
}
// Delete the tasks we don't want to see again from the queue.
//
// tasksToDelete includes both the tasks that we already fulfilled in this call (were part of
// refreshItemsByTld) and "corrupt" tasks we weren't able to parse correctly (this shouldn't
// happen, and we logged a "severe" error)
//
// We let the lease on the rest of the tasks (the tasks we want to keep) expire on its own. The
// reason we don't release these tasks back to the queue immediately is that we are going to
// immediately read another batch of tasks from the queue - and we don't want to get the same
// tasks again.
ImmutableSet<TaskHandle> tasksToDelete =
difference(tasks, classifiedTasks.tasksToKeep()).immutableCopy();
logger.atInfo().log("Removing %d DNS update tasks from the queue.", tasksToDelete.size());
dnsQueue.deleteTasks(tasksToDelete.asList());
logger.atInfo().log("Done processing DNS tasks.");
}
/**
* Classifies the given tasks based on what action we need to take on them.
*
* <p>Note that some tasks might appear in multiple categories (if multiple actions are to be
* taken on them) or in no category (if no action is to be taken on them)
*/
private static ClassifiedTasks classifyTasks(
ImmutableSet<TaskHandle> tasks, ImmutableSet<String> tlds) {
ClassifiedTasks.Builder classifiedTasksBuilder = ClassifiedTasks.builder();
// Read all tasks on the DNS pull queue and load them into the refresh item multimap.
for (TaskHandle task : tasks) {
try {
Map<String, String> params = ImmutableMap.copyOf(task.extractParams());
DateTime creationTime = DateTime.parse(params.get(DNS_TARGET_CREATE_TIME_PARAM));
String tld = params.get(PARAM_TLD);
if (tld == null) {
logger.atSevere().log(
"Discarding invalid DNS refresh request %s; no TLD specified.", task);
} else if (!tlds.contains(tld)) {
classifiedTasksBuilder.tasksToKeepBuilder().add(task);
classifiedTasksBuilder.unknownTldsBuilder().add(tld);
} else if (Registry.get(tld).getDnsPaused()) {
classifiedTasksBuilder.tasksToKeepBuilder().add(task);
classifiedTasksBuilder.pausedTldsBuilder().add(tld);
} else {
String typeString = params.get(DNS_TARGET_TYPE_PARAM);
String name = params.get(DNS_TARGET_NAME_PARAM);
TargetType type = TargetType.valueOf(typeString);
switch (type) {
case DOMAIN:
case HOST:
classifiedTasksBuilder
.refreshItemsByTldBuilder()
.put(tld, RefreshItem.create(type, name, creationTime));
break;
default:
logger.atSevere().log(
"Discarding DNS refresh request %s of type %s.", task, typeString);
break;
}
}
} catch (RuntimeException | UnsupportedEncodingException e) {
logger.atSevere().withCause(e).log("Discarding invalid DNS refresh request %s.", task);
}
}
return classifiedTasksBuilder.build();
}
/**
* Subdivides the tld to {@link RefreshItem} multimap into buckets by lock index, if applicable.
*
* <p>If the tld has numDnsPublishLocks <= 1, we enqueue all updates on the default lock 1 of 1.
*/
private void bucketRefreshItems(ImmutableSetMultimap<String, RefreshItem> refreshItemsByTld) {
// Loop through the multimap by TLD and generate refresh tasks for the hosts and domains for
// each configured DNS writer.
for (Map.Entry<String, Collection<RefreshItem>> tldRefreshItemsEntry
: refreshItemsByTld.asMap().entrySet()) {
String tld = tldRefreshItemsEntry.getKey();
int numPublishLocks = Registry.get(tld).getNumDnsPublishLocks();
// 1 lock or less implies no TLD-wide locks, simply enqueue everything under lock 1 of 1
if (numPublishLocks <= 1) {
enqueueUpdates(tld, 1, 1, tldRefreshItemsEntry.getValue());
} else {
tldRefreshItemsEntry
.getValue()
.stream()
.collect(
toImmutableSetMultimap(
refreshItem -> getLockIndex(tld, numPublishLocks, refreshItem),
refreshItem -> refreshItem))
.asMap()
.entrySet()
.forEach(
entry -> enqueueUpdates(tld, entry.getKey(), numPublishLocks, entry.getValue()));
}
}
}
/**
* Returns the lock index for a given refreshItem.
*
* <p>We hash the second level domain domain for all records, to group in-balliwick hosts (the
* only ones we refresh DNS for) with their superordinate domains. We use consistent hashing to
* determine the lock index because it gives us [0,N) bucketing properties out of the box, then
* add 1 to make indexes within [1,N].
*/
private int getLockIndex(String tld, int numPublishLocks, RefreshItem refreshItem) {
String domain = getSecondLevelDomain(refreshItem.name(), tld);
return Hashing.consistentHash(hashFunction.hashString(domain, UTF_8), numPublishLocks) + 1;
}
/**
* Creates DNS refresh tasks for all writers for the tld within a lock index and batches large
* updates into smaller chunks.
*/
private void enqueueUpdates(
String tld, int lockIndex, int numPublishLocks, Collection<RefreshItem> items) {
for (List<RefreshItem> chunk : Iterables.partition(items, tldUpdateBatchSize)) {
DateTime earliestCreateTime =
chunk.stream().map(RefreshItem::creationTime).min(Comparator.naturalOrder()).get();
for (String dnsWriter : Registry.get(tld).getDnsWriters()) {
taskQueueUtils.enqueue(
dnsPublishPushQueue,
TaskOptions.Builder.withUrl(PublishDnsUpdatesAction.PATH)
.countdownMillis(
jitterSeconds
.map(seconds -> random.nextInt((int) SECONDS.toMillis(seconds)))
.orElse(0))
.param(PARAM_TLD, tld)
.param(PARAM_DNS_WRITER, dnsWriter)
.param(PARAM_LOCK_INDEX, Integer.toString(lockIndex))
.param(PARAM_NUM_PUBLISH_LOCKS, Integer.toString(numPublishLocks))
.param(PARAM_PUBLISH_TASK_ENQUEUED, clock.nowUtc().toString())
.param(PARAM_REFRESH_REQUEST_CREATED, earliestCreateTime.toString())
.param(
PARAM_DOMAINS,
chunk
.stream()
.filter(item -> item.type() == TargetType.DOMAIN)
.map(RefreshItem::name)
.collect(Collectors.joining(",")))
.param(
PARAM_HOSTS,
chunk
.stream()
.filter(item -> item.type() == TargetType.HOST)
.map(RefreshItem::name)
.collect(Collectors.joining(","))));
}
}
}
}

View file

@ -0,0 +1,95 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns;
import static google.registry.model.EppResourceUtils.loadByForeignKey;
import google.registry.dns.DnsConstants.TargetType;
import google.registry.model.EppResource;
import google.registry.model.EppResource.ForeignKeyedEppResource;
import google.registry.model.annotations.ExternalMessagingName;
import google.registry.model.domain.DomainBase;
import google.registry.model.host.HostResource;
import google.registry.request.Action;
import google.registry.request.HttpException.BadRequestException;
import google.registry.request.HttpException.NotFoundException;
import google.registry.request.Parameter;
import google.registry.request.auth.Auth;
import google.registry.util.Clock;
import javax.inject.Inject;
/** Action that manually triggers refresh of DNS information. */
@Action(
service = Action.Service.BACKEND,
path = "/_dr/dnsRefresh",
automaticallyPrintOk = true,
auth = Auth.AUTH_INTERNAL_ONLY)
public final class RefreshDnsAction implements Runnable {
private final Clock clock;
private final DnsQueue dnsQueue;
private final String domainOrHostName;
private final TargetType type;
@Inject
RefreshDnsAction(
@Parameter("domainOrHostName") String domainOrHostName,
@Parameter("type") TargetType type,
Clock clock,
DnsQueue dnsQueue) {
this.domainOrHostName = domainOrHostName;
this.type = type;
this.clock = clock;
this.dnsQueue = dnsQueue;
}
@Override
public void run() {
if (!domainOrHostName.contains(".")) {
throw new BadRequestException("URL parameter 'name' must be fully qualified");
}
switch (type) {
case DOMAIN:
loadAndVerifyExistence(DomainBase.class, domainOrHostName);
dnsQueue.addDomainRefreshTask(domainOrHostName);
break;
case HOST:
verifyHostIsSubordinate(loadAndVerifyExistence(HostResource.class, domainOrHostName));
dnsQueue.addHostRefreshTask(domainOrHostName);
break;
default:
throw new BadRequestException("Unsupported type: " + type);
}
}
private <T extends EppResource & ForeignKeyedEppResource>
T loadAndVerifyExistence(Class<T> clazz, String foreignKey) {
return loadByForeignKey(clazz, foreignKey, clock.nowUtc())
.orElseThrow(
() ->
new NotFoundException(
String.format(
"%s %s not found",
clazz.getAnnotation(ExternalMessagingName.class).value(),
domainOrHostName)));
}
private static void verifyHostIsSubordinate(HostResource host) {
if (!host.isSubordinate()) {
throw new BadRequestException(
String.format("%s isn't a subordinate hostname", host.getFullyQualifiedHostName()));
}
}
}

View file

@ -0,0 +1,17 @@
package(
default_visibility = ["//visibility:public"],
)
licenses(["notice"]) # Apache 2.0
java_library(
name = "writer",
srcs = glob(["*.java"]),
deps = [
"@com_google_dagger",
"@com_google_flogger",
"@com_google_flogger_system_backend",
"@com_google_guava",
"@javax_inject",
],
)

View file

@ -0,0 +1,36 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns.writer;
import static com.google.common.base.Preconditions.checkState;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* A base implementation of {@link DnsWriter} that protects against multiple calls to commit().
*/
public abstract class BaseDnsWriter implements DnsWriter {
private final AtomicBoolean committedAlready = new AtomicBoolean(false);
@Override
public final void commit() {
checkState(committedAlready.compareAndSet(false, true), "commit() has already been called");
commitUnchecked();
}
/** Commits DNS updates. This can never be called more than once. */
protected abstract void commitUnchecked();
}

View file

@ -0,0 +1,78 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns.writer;
/**
* Transaction object for sending an atomic batch of updates for a single zone to the DNS server.
*
* <p>All updates are tentative until commit is called. If commit isn't called, no change will
* happen.
*
* <p>Here's an example of how you would publish updates for a domain and host:
* <pre>
* &#064;Inject Provider&lt;DnsWriter&gt; dnsWriter;
* writer.publishDomain(domainName);
* writer.publishHost(hostName);
* writer.commit();
* </pre>
*/
public interface DnsWriter {
/**
* Loads {@code domainName} from Datastore and publishes its NS/DS records to the DNS server.
* Replaces existing records for the exact name supplied with an NS record for each name server
* and a DS record for each delegation signer stored in the registry for the supplied domain name.
* If the domain is deleted or is in a "non-publish" state then any existing records are deleted.
*
* This must NOT actually perform any action, instead it should stage the action so that it's
* performed when {@link #commit()} is called.
*
* @param domainName the fully qualified domain name, with no trailing dot
*/
void publishDomain(String domainName);
/**
* Loads {@code hostName} from Datastore and publishes its A/AAAA glue records to the DNS server,
* if it is used as an in-bailiwick nameserver. Orphaned glue records are prohibited. Replaces
* existing records for the exact name supplied, with an A or AAAA record (as appropriate) for
* each address stored in the registry, for the supplied host name. If the host is deleted then
* the existing records are deleted. Assumes that this method will only be called for in-bailiwick
* hosts. The registry does not have addresses for other hosts.
*
* This must NOT actually perform any action, instead it should stage the action so that it's
* performed when {@link #commit()} is called.
*
* @param hostName the fully qualified host name, with no trailing dot
*/
void publishHost(String hostName);
/**
* Commits the updates to the DNS server atomically.
*
* <p>The user is responsible for making sure commit() isn't called twice. Implementations are
* encouraged to throw an error if commit() is called twice.
*
* <p>Here's an example of how you would do that
* <pre>
* private boolean committed = false;
* void commit() {
* checkState(!committed, "commit() has already been called");
* committed = true;
* // ... actual commit implementation
* }
* </pre>
*/
void commit();
}

View file

@ -0,0 +1,23 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns.writer;
import java.lang.annotation.Documented;
import javax.inject.Qualifier;
/** Dagger qualifier for the fully-qualified zone name that's being updated. */
@Qualifier
@Documented
public @interface DnsWriterZone {}

View file

@ -0,0 +1,57 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns.writer;
import com.google.common.flogger.FluentLogger;
import java.util.HashSet;
import java.util.Set;
import javax.inject.Inject;
/**
* {@link DnsWriter} that doesn't actually update records in a DNS server.
*
* <p>All this class does is write its displeasure to the logs.
*/
public final class VoidDnsWriter extends BaseDnsWriter {
/**
* The name of the pricing engine, as used in {@code Registry.dnsWriter}. Remember to change
* the value on affected Registry objects to prevent runtime failures.
*/
public static final String NAME = "VoidDnsWriter";
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private final Set<String> names = new HashSet<>();
@Inject
public VoidDnsWriter() {}
@Override
public void publishDomain(String domainName) {
names.add(domainName);
}
@Override
public void publishHost(String hostName) {
names.add(hostName);
}
@Override
protected void commitUnchecked() {
logger.atWarning().log(
"No DnsWriterFactory implementation specified; ignoring names to commit: %s", names);
}
}

View file

@ -0,0 +1,41 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns.writer;
import dagger.Module;
import dagger.Provides;
import dagger.multibindings.IntoMap;
import dagger.multibindings.IntoSet;
import dagger.multibindings.StringKey;
import javax.inject.Named;
/** Dagger module that disables DNS updates. */
@Module
public final class VoidDnsWriterModule {
@Provides
@IntoMap
@StringKey(VoidDnsWriter.NAME)
static DnsWriter provideWriter(VoidDnsWriter writer) {
return writer;
}
@Provides
@IntoSet
@Named("dnsWriterNames")
static String provideWriterName() {
return VoidDnsWriter.NAME;
}
}

View file

@ -0,0 +1,25 @@
package(
default_visibility = ["//visibility:public"],
)
licenses(["notice"]) # Apache 2.0
java_library(
name = "clouddns",
srcs = glob(["*.java"]),
deps = [
"//java/google/registry/config",
"//java/google/registry/dns/writer",
"//java/google/registry/model",
"//java/google/registry/util",
"@com_google_api_client",
"@com_google_apis_google_api_services_dns",
"@com_google_dagger",
"@com_google_flogger",
"@com_google_flogger_system_backend",
"@com_google_guava",
"@com_google_http_client",
"@javax_inject",
"@joda_time",
],
)

View file

@ -0,0 +1,424 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns.writer.clouddns;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import static google.registry.model.EppResourceUtils.loadByForeignKey;
import static google.registry.util.DomainNameUtils.getSecondLevelDomain;
import com.google.api.client.googleapis.json.GoogleJsonError;
import com.google.api.client.googleapis.json.GoogleJsonResponseException;
import com.google.api.services.dns.Dns;
import com.google.api.services.dns.model.Change;
import com.google.api.services.dns.model.ResourceRecordSet;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import com.google.common.flogger.FluentLogger;
import com.google.common.net.InternetDomainName;
import com.google.common.util.concurrent.RateLimiter;
import google.registry.config.RegistryConfig.Config;
import google.registry.dns.writer.BaseDnsWriter;
import google.registry.dns.writer.DnsWriter;
import google.registry.dns.writer.DnsWriterZone;
import google.registry.model.domain.DomainBase;
import google.registry.model.domain.secdns.DelegationSignerData;
import google.registry.model.host.HostResource;
import google.registry.model.registry.Registries;
import google.registry.util.Clock;
import google.registry.util.Concurrent;
import google.registry.util.Retrier;
import java.io.IOException;
import java.net.Inet4Address;
import java.net.Inet6Address;
import java.net.InetAddress;
import java.util.AbstractMap.SimpleImmutableEntry;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Stream;
import javax.inject.Inject;
import javax.inject.Named;
import org.joda.time.Duration;
/**
* {@link DnsWriter} implementation that talks to Google Cloud DNS.
*
* @see <a href="https://cloud.google.com/dns/docs/">Google Cloud DNS Documentation</a>
*/
public class CloudDnsWriter extends BaseDnsWriter {
/**
* The name of the dns writer, as used in {@code Registry.dnsWriter}. Remember to change the value
* on affected Registry objects to prevent runtime failures.
*/
public static final String NAME = "CloudDnsWriter";
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private static final ImmutableSet<String> RETRYABLE_EXCEPTION_REASONS =
ImmutableSet.of("preconditionFailed", "notFound", "alreadyExists");
private final Clock clock;
private final RateLimiter rateLimiter;
private final int numThreads;
// TODO(shikhman): This uses @Named("transientFailureRetries") which may not be tuned for this
// application.
private final Retrier retrier;
private final Duration defaultATtl;
private final Duration defaultNsTtl;
private final Duration defaultDsTtl;
private final String projectId;
private final String zoneName;
private final Dns dnsConnection;
private final HashMap<String, ImmutableSet<ResourceRecordSet>> desiredRecords = new HashMap<>();
@Inject
CloudDnsWriter(
Dns dnsConnection,
@Config("projectId") String projectId,
@DnsWriterZone String zoneName,
@Config("dnsDefaultATtl") Duration defaultATtl,
@Config("dnsDefaultNsTtl") Duration defaultNsTtl,
@Config("dnsDefaultDsTtl") Duration defaultDsTtl,
@Named("cloudDns") RateLimiter rateLimiter,
@Named("cloudDnsNumThreads") int numThreads,
Clock clock,
Retrier retrier) {
this.dnsConnection = dnsConnection;
this.projectId = projectId;
this.zoneName = zoneName.replace('.', '-');
this.defaultATtl = defaultATtl;
this.defaultNsTtl = defaultNsTtl;
this.defaultDsTtl = defaultDsTtl;
this.rateLimiter = rateLimiter;
this.clock = clock;
this.retrier = retrier;
this.numThreads = numThreads;
}
/** Publish the domain and all subordinate hosts. */
@Override
public void publishDomain(String domainName) {
// Canonicalize name
String absoluteDomainName = getAbsoluteHostName(domainName);
// Load the target domain. Note that it can be absent if this domain was just deleted.
Optional<DomainBase> domainBase =
loadByForeignKey(DomainBase.class, domainName, clock.nowUtc());
// Return early if no DNS records should be published.
// desiredRecordsBuilder is populated with an empty set to indicate that all existing records
// should be deleted.
if (!domainBase.isPresent() || !domainBase.get().shouldPublishToDns()) {
desiredRecords.put(absoluteDomainName, ImmutableSet.of());
return;
}
ImmutableSet.Builder<ResourceRecordSet> domainRecords = new ImmutableSet.Builder<>();
// Construct DS records (if any).
Set<DelegationSignerData> dsData = domainBase.get().getDsData();
if (!dsData.isEmpty()) {
HashSet<String> dsRrData = new HashSet<>();
for (DelegationSignerData ds : dsData) {
dsRrData.add(ds.toRrData());
}
if (!dsRrData.isEmpty()) {
domainRecords.add(
new ResourceRecordSet()
.setName(absoluteDomainName)
.setTtl((int) defaultDsTtl.getStandardSeconds())
.setType("DS")
.setKind("dns#resourceRecordSet")
.setRrdatas(ImmutableList.copyOf(dsRrData)));
}
}
// Construct NS records (if any).
Set<String> nameserverData = domainBase.get().loadNameserverFullyQualifiedHostNames();
Set<String> subordinateHosts = domainBase.get().getSubordinateHosts();
if (!nameserverData.isEmpty()) {
HashSet<String> nsRrData = new HashSet<>();
for (String hostName : nameserverData) {
nsRrData.add(getAbsoluteHostName(hostName));
// Construct glue records for subordinate NS hostnames (if any)
if (subordinateHosts.contains(hostName)) {
publishSubordinateHost(hostName);
}
}
if (!nsRrData.isEmpty()) {
domainRecords.add(
new ResourceRecordSet()
.setName(absoluteDomainName)
.setTtl((int) defaultNsTtl.getStandardSeconds())
.setType("NS")
.setKind("dns#resourceRecordSet")
.setRrdatas(ImmutableList.copyOf(nsRrData)));
}
}
desiredRecords.put(absoluteDomainName, domainRecords.build());
logger.atFine().log(
"Will write %d records for domain %s", domainRecords.build().size(), absoluteDomainName);
}
private void publishSubordinateHost(String hostName) {
logger.atInfo().log("Publishing glue records for %s", hostName);
// Canonicalize name
String absoluteHostName = getAbsoluteHostName(hostName);
// Load the target host. Note that it can be absent if this host was just deleted.
// desiredRecords is populated with an empty set to indicate that all existing records
// should be deleted.
Optional<HostResource> host = loadByForeignKey(HostResource.class, hostName, clock.nowUtc());
// Return early if the host is deleted.
if (!host.isPresent()) {
desiredRecords.put(absoluteHostName, ImmutableSet.of());
return;
}
ImmutableSet.Builder<ResourceRecordSet> domainRecords = new ImmutableSet.Builder<>();
// Construct A and AAAA records (if any).
HashSet<String> aRrData = new HashSet<>();
HashSet<String> aaaaRrData = new HashSet<>();
for (InetAddress ip : host.get().getInetAddresses()) {
if (ip instanceof Inet4Address) {
aRrData.add(ip.getHostAddress());
} else {
checkArgument(ip instanceof Inet6Address);
aaaaRrData.add(ip.getHostAddress());
}
}
if (!aRrData.isEmpty()) {
domainRecords.add(
new ResourceRecordSet()
.setName(absoluteHostName)
.setTtl((int) defaultATtl.getStandardSeconds())
.setType("A")
.setKind("dns#resourceRecordSet")
.setRrdatas(ImmutableList.copyOf(aRrData)));
}
if (!aaaaRrData.isEmpty()) {
domainRecords.add(
new ResourceRecordSet()
.setName(absoluteHostName)
.setTtl((int) defaultATtl.getStandardSeconds())
.setType("AAAA")
.setKind("dns#resourceRecordSet")
.setRrdatas(ImmutableList.copyOf(aaaaRrData)));
}
desiredRecords.put(absoluteHostName, domainRecords.build());
}
/**
* Publish A/AAAA records to Cloud DNS.
*
* <p>Cloud DNS has no API for glue -- A/AAAA records are automatically matched to their
* corresponding NS records to serve glue.
*/
@Override
public void publishHost(String hostName) {
// Get the superordinate domain name of the host.
InternetDomainName host = InternetDomainName.from(hostName);
Optional<InternetDomainName> tld = Registries.findTldForName(host);
// Host not managed by our registry, no need to update DNS.
if (!tld.isPresent()) {
logger.atSevere().log("publishHost called for invalid host %s", hostName);
return;
}
// Refresh the superordinate domain, since we shouldn't be publishing glue records if we are not
// authoritative for the superordinate domain.
publishDomain(getSecondLevelDomain(hostName, tld.get().toString()));
}
/**
* Sync changes in a zone requested by publishDomain and publishHost to Cloud DNS.
*
* <p>The zone for the TLD must exist first in Cloud DNS and must be DNSSEC enabled.
*
* <p>The relevant resource records (including those of all subordinate hosts) will be retrieved
* and the operation will be retried until the state of the retrieved zone data matches the
* representation built via this writer.
*/
@Override
protected void commitUnchecked() {
ImmutableMap<String, ImmutableSet<ResourceRecordSet>> desiredRecordsCopy =
ImmutableMap.copyOf(desiredRecords);
retrier.callWithRetry(() -> mutateZone(desiredRecordsCopy), ZoneStateException.class);
logger.atInfo().log("Wrote to Cloud DNS");
}
/** Returns the glue records for in-bailiwick nameservers for the given domain+records. */
private Stream<String> filterGlueRecords(String domainName, Stream<ResourceRecordSet> records) {
return records
.filter(record -> record.getType().equals("NS"))
.flatMap(record -> record.getRrdatas().stream())
.filter(hostName -> hostName.endsWith("." + domainName) && !hostName.equals(domainName));
}
/** Mutate the zone with the provided {@code desiredRecords}. */
@VisibleForTesting
void mutateZone(ImmutableMap<String, ImmutableSet<ResourceRecordSet>> desiredRecords) {
// Fetch all existing records for names that this writer is trying to modify
ImmutableSet.Builder<ResourceRecordSet> flattenedExistingRecords = new ImmutableSet.Builder<>();
// First, fetch the records for the given domains
Map<String, List<ResourceRecordSet>> domainRecords =
getResourceRecordsForDomains(desiredRecords.keySet());
// add the records to the list of existing records
domainRecords.values().forEach(flattenedExistingRecords::addAll);
// Get the glue record host names from the given records
ImmutableSet<String> hostsToRead =
domainRecords
.entrySet()
.stream()
.flatMap(entry -> filterGlueRecords(entry.getKey(), entry.getValue().stream()))
.collect(toImmutableSet());
// Then fetch and add the records for these hosts
getResourceRecordsForDomains(hostsToRead).values().forEach(flattenedExistingRecords::addAll);
// Flatten the desired records into one set.
ImmutableSet.Builder<ResourceRecordSet> flattenedDesiredRecords = new ImmutableSet.Builder<>();
desiredRecords.values().forEach(flattenedDesiredRecords::addAll);
// Delete all existing records and add back the desired records
updateResourceRecords(flattenedDesiredRecords.build(), flattenedExistingRecords.build());
}
/**
* Fetch the {@link ResourceRecordSet}s for the given domain names under this zone.
*
* <p>The provided domain should be in absolute form.
*/
private Map<String, List<ResourceRecordSet>> getResourceRecordsForDomains(
Set<String> domainNames) {
logger.atFine().log("Fetching records for %s", domainNames);
// As per Concurrent.transform() - if numThreads or domainNames.size() < 2, it will not use
// threading.
return ImmutableMap.copyOf(
Concurrent.transform(
domainNames,
numThreads,
domainName ->
new SimpleImmutableEntry<>(domainName, getResourceRecordsForDomain(domainName))));
}
/**
* Fetch the {@link ResourceRecordSet}s for the given domain name under this zone.
*
* <p>The provided domain should be in absolute form.
*/
private List<ResourceRecordSet> getResourceRecordsForDomain(String domainName) {
// TODO(b/70217860): do we want to use a retrier here?
try {
Dns.ResourceRecordSets.List listRecordsRequest =
dnsConnection.resourceRecordSets().list(projectId, zoneName).setName(domainName);
rateLimiter.acquire();
return listRecordsRequest.execute().getRrsets();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Update {@link ResourceRecordSet}s under this zone.
*
* <p>This call should be used in conjunction with {@link #getResourceRecordsForDomains} in a
* get-and-set retry loop.
*
* <p>See {@link "https://cloud.google.com/dns/troubleshooting"} for a list of errors produced by
* the Google Cloud DNS API.
*
* @throws ZoneStateException if the operation could not be completely successfully because the
* records to delete do not exist, already exist or have been modified with different
* attributes since being queried.
*/
private void updateResourceRecords(
ImmutableSet<ResourceRecordSet> additions, ImmutableSet<ResourceRecordSet> deletions) {
// Find records that are both in additions and deletions, so we can remove them from both before
// requesting the change. This is mostly for optimization reasons - not doing so doesn't affect
// the result.
ImmutableSet<ResourceRecordSet> intersection =
Sets.intersection(additions, deletions).immutableCopy();
logger.atInfo().log(
"There are %d common items out of the %d items in 'additions' and %d items in 'deletions'",
intersection.size(), additions.size(), deletions.size());
// Exit early if we have nothing to update - dnsConnection doesn't work on empty changes
if (additions.equals(deletions)) {
logger.atInfo().log("Returning early because additions is the same as deletions");
return;
}
Change change =
new Change()
.setAdditions(ImmutableList.copyOf(Sets.difference(additions, intersection)))
.setDeletions(ImmutableList.copyOf(Sets.difference(deletions, intersection)));
rateLimiter.acquire();
try {
dnsConnection.changes().create(projectId, zoneName, change).execute();
} catch (GoogleJsonResponseException e) {
GoogleJsonError err = e.getDetails();
// We did something really wrong here, just give up and re-throw
if (err == null || err.getErrors().size() > 1) {
throw new RuntimeException(e);
}
String errorReason = err.getErrors().get(0).getReason();
if (RETRYABLE_EXCEPTION_REASONS.contains(errorReason)) {
throw new ZoneStateException(errorReason);
} else {
throw new RuntimeException(e);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Returns the presentation format ending in a dot used for an absolute hostname.
*
* @param hostName the fully qualified hostname
*/
private static String getAbsoluteHostName(String hostName) {
return hostName.endsWith(".") ? hostName : hostName + ".";
}
/** Zone state on Cloud DNS does not match the expected state. */
static class ZoneStateException extends RuntimeException {
public ZoneStateException(String reason) {
super("Zone state on Cloud DNS does not match the expected state: " + reason);
}
}
}

View file

@ -0,0 +1,84 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns.writer.clouddns;
import com.google.api.client.googleapis.auth.oauth2.GoogleCredential;
import com.google.api.services.dns.Dns;
import com.google.common.util.concurrent.RateLimiter;
import dagger.Binds;
import dagger.Module;
import dagger.Provides;
import dagger.multibindings.IntoMap;
import dagger.multibindings.IntoSet;
import dagger.multibindings.StringKey;
import google.registry.config.CredentialModule.DefaultCredential;
import google.registry.config.RegistryConfig.Config;
import google.registry.dns.writer.DnsWriter;
import java.util.Optional;
import javax.inject.Named;
/** Dagger module for Google Cloud DNS service connection objects. */
@Module
public abstract class CloudDnsWriterModule {
@Provides
static Dns provideDns(
@DefaultCredential GoogleCredential credential,
@Config("projectId") String projectId,
@Config("cloudDnsRootUrl") Optional<String> rootUrl,
@Config("cloudDnsServicePath") Optional<String> servicePath) {
Dns.Builder builder =
new Dns.Builder(credential.getTransport(), credential.getJsonFactory(), credential)
.setApplicationName(projectId);
rootUrl.ifPresent(builder::setRootUrl);
servicePath.ifPresent(builder::setServicePath);
return builder.build();
}
@Binds
@IntoMap
@StringKey(CloudDnsWriter.NAME)
abstract DnsWriter provideWriter(CloudDnsWriter writer);
@Provides
@IntoSet
@Named("dnsWriterNames")
static String provideWriterName() {
return CloudDnsWriter.NAME;
}
@Provides
@Named("cloudDns")
static RateLimiter provideRateLimiter() {
// This is the default max QPS for Cloud DNS. It can be increased by contacting the team
// via the Quotas page on the Cloud Console.
int cloudDnsMaxQps = 50;
return RateLimiter.create(cloudDnsMaxQps);
}
@Provides
@Named("cloudDnsNumThreads")
static int provideNumThreads() {
// TODO(b/70217860): find the "best" number of threads, taking into account running time, App
// Engine constraints, and any Cloud DNS comsiderations etc.
//
// NOTE: any number below 2 will not use threading at all.
return 10;
}
private CloudDnsWriterModule() {}
}

View file

@ -0,0 +1,21 @@
package(
default_visibility = ["//visibility:public"],
)
licenses(["notice"]) # Apache 2.0
java_library(
name = "dnsupdate",
srcs = glob(["*.java"]),
deps = [
"//java/google/registry/config",
"//java/google/registry/dns/writer",
"//java/google/registry/model",
"//java/google/registry/util",
"@com_google_dagger",
"@com_google_guava",
"@dnsjava",
"@javax_inject",
"@joda_time",
],
)

View file

@ -0,0 +1,132 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns.writer.dnsupdate;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Verify.verify;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.primitives.Ints;
import google.registry.config.RegistryConfig.Config;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.InetAddress;
import java.net.Socket;
import java.nio.ByteBuffer;
import javax.inject.Inject;
import javax.net.SocketFactory;
import org.joda.time.Duration;
import org.xbill.DNS.Message;
import org.xbill.DNS.Opcode;
/**
* A transport for DNS messages. Sends/receives DNS messages over TCP using old-style {@link Socket}
* s and the message framing defined in <a href="https://tools.ietf.org/html/rfc1035">RFC 1035</a>.
* We would like use the dnsjava library's {@link org.xbill.DNS.SimpleResolver} class for this, but
* it requires {@link java.nio.channels.SocketChannel} which is not supported on AppEngine.
*/
public class DnsMessageTransport {
/**
* Size of message length field for DNS TCP transport.
*
* @see <a href="https://tools.ietf.org/html/rfc1035">RFC 1035</a>
*/
static final int MESSAGE_LENGTH_FIELD_BYTES = 2;
private static final int MESSAGE_MAXIMUM_LENGTH = (1 << (MESSAGE_LENGTH_FIELD_BYTES * 8)) - 1;
/**
* The standard DNS port number.
*
* @see <a href="https://tools.ietf.org/html/rfc1035">RFC 1035</a>
*/
@VisibleForTesting static final int DNS_PORT = 53;
private final SocketFactory factory;
private final String updateHost;
private final int updateTimeout;
/**
* Class constructor.
*
* @param factory a factory for TCP sockets
* @param updateHost host name of the DNS server
* @param updateTimeout update I/O timeout
*/
@Inject
public DnsMessageTransport(
SocketFactory factory,
@Config("dnsUpdateHost") String updateHost,
@Config("dnsUpdateTimeout") Duration updateTimeout) {
this.factory = factory;
this.updateHost = updateHost;
this.updateTimeout = Ints.checkedCast(updateTimeout.getMillis());
}
/**
* Sends a DNS "query" message (most likely an UPDATE) and returns the response. The response is
* checked for matching ID and opcode.
*
* @param query a message to send
* @return the response received from the server
* @throws IOException if the Socket input/output streams throws one
* @throws IllegalArgumentException if the query is too large to be sent (> 65535 bytes)
*/
public Message send(Message query) throws IOException {
try (Socket socket = factory.createSocket(InetAddress.getByName(updateHost), DNS_PORT)) {
socket.setSoTimeout(updateTimeout);
writeMessage(socket.getOutputStream(), query);
Message response = readMessage(socket.getInputStream());
checkValidResponse(query, response);
return response;
}
}
private void checkValidResponse(Message query, Message response) {
verify(
response.getHeader().getID() == query.getHeader().getID(),
"response ID %s does not match query ID %s",
response.getHeader().getID(),
query.getHeader().getID());
verify(
response.getHeader().getOpcode() == query.getHeader().getOpcode(),
"response opcode '%s' does not match query opcode '%s'",
Opcode.string(response.getHeader().getOpcode()),
Opcode.string(query.getHeader().getOpcode()));
}
private void writeMessage(OutputStream outputStream, Message message) throws IOException {
byte[] messageData = message.toWire();
checkArgument(
messageData.length <= MESSAGE_MAXIMUM_LENGTH,
"DNS request message larger than maximum of %s: %s",
MESSAGE_MAXIMUM_LENGTH,
messageData.length);
ByteBuffer buffer = ByteBuffer.allocate(messageData.length + MESSAGE_LENGTH_FIELD_BYTES);
buffer.putShort((short) messageData.length);
buffer.put(messageData);
outputStream.write(buffer.array());
}
private Message readMessage(InputStream inputStream) throws IOException {
DataInputStream stream = new DataInputStream(inputStream);
int length = stream.readUnsignedShort();
byte[] messageData = new byte[length];
stream.readFully(messageData);
return new Message(messageData);
}
}

View file

@ -0,0 +1,44 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns.writer.dnsupdate;
import dagger.Module;
import dagger.Provides;
import google.registry.config.RegistryConfig.Config;
import org.joda.time.Duration;
/** Dagger module that provides DNS configuration settings. */
@Module
public class DnsUpdateConfigModule {
/**
* Host that receives DNS updates from the registry.
* Usually a "hidden master" for the TLDs.
*/
@Provides
@Config("dnsUpdateHost")
public static String provideDnsUpdateHost() {
return "localhost";
}
/**
* Timeout on the socket for DNS update requests.
*/
@Provides
@Config("dnsUpdateTimeout")
public static Duration provideDnsUpdateTimeout() {
return Duration.standardSeconds(30);
}
}

View file

@ -0,0 +1,281 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns.writer.dnsupdate;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.base.Verify.verify;
import static com.google.common.collect.Sets.intersection;
import static com.google.common.collect.Sets.union;
import static google.registry.model.EppResourceUtils.loadByForeignKey;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.net.InternetDomainName;
import google.registry.config.RegistryConfig.Config;
import google.registry.dns.writer.BaseDnsWriter;
import google.registry.dns.writer.DnsWriterZone;
import google.registry.model.domain.DomainBase;
import google.registry.model.domain.secdns.DelegationSignerData;
import google.registry.model.host.HostResource;
import google.registry.model.registry.Registries;
import google.registry.util.Clock;
import java.io.IOException;
import java.net.Inet4Address;
import java.net.Inet6Address;
import java.net.InetAddress;
import java.util.Optional;
import javax.inject.Inject;
import org.joda.time.Duration;
import org.xbill.DNS.AAAARecord;
import org.xbill.DNS.ARecord;
import org.xbill.DNS.DClass;
import org.xbill.DNS.DSRecord;
import org.xbill.DNS.Message;
import org.xbill.DNS.NSRecord;
import org.xbill.DNS.Name;
import org.xbill.DNS.RRset;
import org.xbill.DNS.Rcode;
import org.xbill.DNS.TextParseException;
import org.xbill.DNS.Type;
import org.xbill.DNS.Update;
/**
* A DnsWriter that implements the DNS UPDATE protocol as specified in
* <a href="https://tools.ietf.org/html/rfc2136">RFC 2136</a>. Publishes changes in the
* domain-registry to a (capable) external DNS server, sometimes called a "hidden master". DNS
* UPDATE messages are sent via a supplied "transport" class.
*
* On call to {@link #commit()}, a single UPDATE message is created containing the records required
* to "synchronize" the DNS with the current (at the time of processing) state of the registry, for
* the supplied domain/host.
*
* <p>The general strategy of the publish methods is to delete <em>all</em> resource records of any
* <em>type</em> that match the exact domain/host name supplied. And then for create/update cases,
* add any required records. Deleting all records of any type assumes that the registry is
* authoritative for all records for names in the zone. This seems appropriate for a TLD DNS server,
* which should only contain records required for proper DNS delegation.
*
* <p>Only NS, DS, A, and AAAA records are published, and in particular no DNSSEC signing is done
* assuming that this will be done by a third party DNS provider.
*
* <p>Each commit call is treated as an atomic update to the DNS. If a commit fails an exception
* is thrown. The SOA record serial number is implicitly incremented by the server on each UPDATE
* message, as required by RFC 2136. Care must be taken to make sure the SOA serial number does not
* go backwards if the entire TLD (zone) is "reset" to empty and republished.
*/
public class DnsUpdateWriter extends BaseDnsWriter {
/**
* The name of the pricing engine, as used in {@code Registry.dnsWriter}. Remember to change
* the value on affected Registry objects to prevent runtime failures.
*/
public static final String NAME = "DnsUpdateWriter";
private final Duration dnsDefaultATtl;
private final Duration dnsDefaultNsTtl;
private final Duration dnsDefaultDsTtl;
private final DnsMessageTransport transport;
private final Clock clock;
private final Update update;
private final String zoneName;
/**
* Class constructor.
*
* @param dnsDefaultATtl TTL used for any created resource records
* @param dnsDefaultNsTtl TTL used for any created nameserver records
* @param dnsDefaultDsTtl TTL used for any created DS records
* @param transport the transport used to send/receive the UPDATE messages
* @param clock a source of time
*/
@Inject
public DnsUpdateWriter(
@DnsWriterZone String zoneName,
@Config("dnsDefaultATtl") Duration dnsDefaultATtl,
@Config("dnsDefaultNsTtl") Duration dnsDefaultNsTtl,
@Config("dnsDefaultDsTtl") Duration dnsDefaultDsTtl,
DnsMessageTransport transport,
Clock clock) {
this.zoneName = zoneName;
this.update = new Update(toAbsoluteName(zoneName));
this.dnsDefaultATtl = dnsDefaultATtl;
this.dnsDefaultNsTtl = dnsDefaultNsTtl;
this.dnsDefaultDsTtl = dnsDefaultDsTtl;
this.transport = transport;
this.clock = clock;
}
/**
* Publish the domain, while keeping tracking of which host refresh quest triggered this domain
* refresh. Delete the requesting host in addition to all subordinate hosts.
*
* @param domainName the fully qualified domain name, with no trailing dot
* @param requestingHostName the fully qualified host name, with no trailing dot, that triggers
* this domain refresh request
*/
private void publishDomain(String domainName, String requestingHostName) {
Optional<DomainBase> domainOptional =
loadByForeignKey(DomainBase.class, domainName, clock.nowUtc());
update.delete(toAbsoluteName(domainName), Type.ANY);
// If the domain is now deleted, then don't update DNS for it.
if (domainOptional.isPresent()) {
DomainBase domain = domainOptional.get();
// As long as the domain exists, orphan glues should be cleaned.
deleteSubordinateHostAddressSet(domain, requestingHostName, update);
if (domain.shouldPublishToDns()) {
addInBailiwickNameServerSet(domain, update);
update.add(makeNameServerSet(domain));
update.add(makeDelegationSignerSet(domain));
}
}
}
@Override
public void publishDomain(String domainName) {
publishDomain(domainName, null);
}
@Override
public void publishHost(String hostName) {
// Get the superordinate domain name of the host.
InternetDomainName host = InternetDomainName.from(hostName);
ImmutableList<String> hostParts = host.parts();
Optional<InternetDomainName> tld = Registries.findTldForName(host);
// host not managed by our registry, no need to update DNS.
if (!tld.isPresent()) {
return;
}
ImmutableList<String> tldParts = tld.get().parts();
ImmutableList<String> domainParts =
hostParts.subList(hostParts.size() - tldParts.size() - 1, hostParts.size());
String domain = Joiner.on(".").join(domainParts);
// Refresh the superordinate domain, always delete the host first to ensure idempotency,
// and only publish the host if it is a glue record.
publishDomain(domain, hostName);
}
@Override
protected void commitUnchecked() {
try {
Message response = transport.send(update);
verify(
response.getRcode() == Rcode.NOERROR,
"DNS server failed domain update for '%s' rcode: %s",
zoneName,
Rcode.string(response.getRcode()));
} catch (IOException e) {
throw new RuntimeException("publishDomain failed for zone: " + zoneName, e);
}
}
private RRset makeDelegationSignerSet(DomainBase domain) {
RRset signerSet = new RRset();
for (DelegationSignerData signerData : domain.getDsData()) {
DSRecord dsRecord =
new DSRecord(
toAbsoluteName(domain.getFullyQualifiedDomainName()),
DClass.IN,
dnsDefaultDsTtl.getStandardSeconds(),
signerData.getKeyTag(),
signerData.getAlgorithm(),
signerData.getDigestType(),
signerData.getDigest());
signerSet.addRR(dsRecord);
}
return signerSet;
}
private void deleteSubordinateHostAddressSet(
DomainBase domain, String additionalHost, Update update) {
for (String hostName :
union(
domain.getSubordinateHosts(),
(additionalHost == null
? ImmutableSet.of()
: ImmutableSet.of(additionalHost)))) {
update.delete(toAbsoluteName(hostName), Type.ANY);
}
}
private void addInBailiwickNameServerSet(DomainBase domain, Update update) {
for (String hostName :
intersection(
domain.loadNameserverFullyQualifiedHostNames(), domain.getSubordinateHosts())) {
Optional<HostResource> host = loadByForeignKey(HostResource.class, hostName, clock.nowUtc());
checkState(host.isPresent(), "Host %s cannot be loaded", hostName);
update.add(makeAddressSet(host.get()));
update.add(makeV6AddressSet(host.get()));
}
}
private RRset makeNameServerSet(DomainBase domain) {
RRset nameServerSet = new RRset();
for (String hostName : domain.loadNameserverFullyQualifiedHostNames()) {
NSRecord record =
new NSRecord(
toAbsoluteName(domain.getFullyQualifiedDomainName()),
DClass.IN,
dnsDefaultNsTtl.getStandardSeconds(),
toAbsoluteName(hostName));
nameServerSet.addRR(record);
}
return nameServerSet;
}
private RRset makeAddressSet(HostResource host) {
RRset addressSet = new RRset();
for (InetAddress address : host.getInetAddresses()) {
if (address instanceof Inet4Address) {
ARecord record =
new ARecord(
toAbsoluteName(host.getFullyQualifiedHostName()),
DClass.IN,
dnsDefaultATtl.getStandardSeconds(),
address);
addressSet.addRR(record);
}
}
return addressSet;
}
private RRset makeV6AddressSet(HostResource host) {
RRset addressSet = new RRset();
for (InetAddress address : host.getInetAddresses()) {
if (address instanceof Inet6Address) {
AAAARecord record =
new AAAARecord(
toAbsoluteName(host.getFullyQualifiedHostName()),
DClass.IN,
dnsDefaultATtl.getStandardSeconds(),
address);
addressSet.addRR(record);
}
}
return addressSet;
}
private Name toAbsoluteName(String name) {
try {
return Name.fromString(name, Name.root);
} catch (TextParseException e) {
throw new RuntimeException(
String.format("toAbsoluteName failed for name: %s in zone: %s", name, zoneName), e);
}
}
}

View file

@ -0,0 +1,48 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.dns.writer.dnsupdate;
import dagger.Module;
import dagger.Provides;
import dagger.multibindings.IntoMap;
import dagger.multibindings.IntoSet;
import dagger.multibindings.StringKey;
import google.registry.dns.writer.DnsWriter;
import javax.inject.Named;
import javax.net.SocketFactory;
/** Dagger module that provides a DnsUpdateWriter. */
@Module
public abstract class DnsUpdateWriterModule {
@Provides
static SocketFactory provideSocketFactory() {
return SocketFactory.getDefault();
}
@Provides
@IntoMap
@StringKey(DnsUpdateWriter.NAME)
static DnsWriter provideWriter(DnsUpdateWriter writer) {
return writer;
}
@Provides
@IntoSet
@Named("dnsWriterNames")
static String provideWriterName() {
return DnsUpdateWriter.NAME;
}
}

View file

@ -0,0 +1,70 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.documentation;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static java.util.Comparator.comparing;
import com.google.common.collect.ImmutableList;
import com.sun.javadoc.ClassDoc;
import com.sun.javadoc.RootDoc;
import google.registry.documentation.FlowDocumentation.ErrorCase;
import java.io.IOException;
import java.util.Arrays;
import java.util.stream.Stream;
/**
* Main entry point class for documentation generation. An instance of this class reads data
* via the javadoc system upon creation and stores it for answering future queries for
* documentation information.
*/
public final class DocumentationGenerator {
private final RootDoc sourceRoot;
/** Returns a new DocumentationGenerator object with parsed information from javadoc. */
public DocumentationGenerator() throws IOException {
sourceRoot = JavadocWrapper.getRootDoc();
}
/** Returns generated Markdown output for the flows. Convenience method for clients. */
public String generateMarkdown() {
return MarkdownDocumentationFormatter.generateMarkdownOutput(getFlowDocs());
}
/** Returns a list of flow documentation objects derived from this generator's data. */
public ImmutableList<FlowDocumentation> getFlowDocs() {
// Relevant flows are leaf flows: precisely the concrete subclasses of Flow.
return getConcreteSubclassesStream(FlowDocumentation.BASE_FLOW_CLASS_NAME)
.sorted(comparing(ClassDoc::typeName))
.map(FlowDocumentation::new)
.collect(toImmutableList());
}
/** Returns a list of all possible error cases that might occur. */
public ImmutableList<ErrorCase> getAllErrors() {
// Relevant error cases are precisely the concrete subclasses of EppException.
return getConcreteSubclassesStream(FlowDocumentation.EXCEPTION_CLASS_NAME)
.map(ErrorCase::new)
.collect(toImmutableList());
}
/** Helper to return all concrete subclasses of a given named class. */
private Stream<ClassDoc> getConcreteSubclassesStream(String baseClassName) {
final ClassDoc baseFlowClassDoc = sourceRoot.classNamed(baseClassName);
return Arrays.stream(sourceRoot.classes())
.filter(classDoc -> classDoc.subclassOf(baseFlowClassDoc) && !classDoc.isAbstract());
}
}

View file

@ -0,0 +1,275 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.documentation;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.MoreCollectors.onlyElement;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMultimap;
import com.google.common.collect.ListMultimap;
import com.google.common.collect.Multimaps;
import com.sun.javadoc.AnnotationDesc;
import com.sun.javadoc.ClassDoc;
import com.sun.javadoc.FieldDoc;
import com.sun.javadoc.SeeTag;
import com.sun.javadoc.Tag;
import google.registry.model.eppoutput.Result.Code;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.TreeMap;
import java.util.stream.Stream;
import javax.annotation.Nullable;
/**
* Class to represent documentation information for a single EPP flow.
*
* <p>The static method getFlowDocs() on this class returns a list of FlowDocumentation
* instances corresponding to the leaf flows in the flows package, constructing the instances
* from class information returned from the javadoc system. Each instance has methods for
* retrieving relevant information about the flow, such as a description, error conditions, etc.
*/
public class FlowDocumentation {
/** Constants for names of various relevant packages and classes. */
static final String FLOW_PACKAGE_NAME = "google.registry.flows";
static final String BASE_FLOW_CLASS_NAME = FLOW_PACKAGE_NAME + ".Flow";
static final String EXCEPTION_CLASS_NAME = FLOW_PACKAGE_NAME + ".EppException";
static final String CODE_ANNOTATION_NAME = EXCEPTION_CLASS_NAME + ".EppResultCode";
/** Name of the class for this flow. */
private final String name;
/** Fully qualified name of the class for this flow. */
private final String qualifiedName;
/** Name of the package in which this flow resides. */
private final String packageName;
/** Class docs for the flow. */
private final String classDocs;
/** Javadoc-tagged error conditions for this flow in list form. */
private final List<ErrorCase> errors;
/** Javadoc-tagged error conditions for this flow, organized by underlying error code. */
private final ListMultimap<Long, ErrorCase> errorsByCode;
/**
* Creates a FlowDocumentation for this flow class using data from javadoc tags. Not public
* because clients should get FlowDocumentation objects via the DocumentationGenerator class.
*/
protected FlowDocumentation(ClassDoc flowDoc) {
name = flowDoc.name();
qualifiedName = flowDoc.qualifiedName();
packageName = flowDoc.containingPackage().name();
classDocs = flowDoc.commentText();
errors = new ArrayList<>();
// Store error codes in sorted order, and leave reasons in insert order.
errorsByCode =
Multimaps.newListMultimap(new TreeMap<Long, Collection<ErrorCase>>(), ArrayList::new);
parseTags(flowDoc);
}
public String getName() {
return name;
}
public String getQualifiedName() {
return qualifiedName;
}
public String getPackageName() {
return packageName;
}
public String getClassDocs() {
return classDocs;
}
public ImmutableList<ErrorCase> getErrors() {
return ImmutableList.copyOf(errors);
}
public ImmutableMultimap<Long, ErrorCase> getErrorsByCode() {
return ImmutableMultimap.copyOf(errorsByCode);
}
/** Iterates through javadoc tags on the underlying class and calls specific parsing methods. */
private void parseTags(ClassDoc flowDoc) {
for (Tag tag : flowDoc.tags()) {
// Everything else is not a relevant tag.
if ("@error".equals(tag.name())) {
parseErrorTag(tag);
}
}
}
/** Exception to throw when an @error tag cannot be parsed correctly. */
private static class BadErrorTagFormatException extends IllegalStateException {
/** Makes a message to use as a prefix for the reason passed up to the superclass. */
private static String makeMessage(String reason, Tag tag) {
return String.format("Bad @error tag format at %s - %s", tag.position(), reason);
}
private BadErrorTagFormatException(String reason, Tag tag) {
super(makeMessage(reason, tag));
}
private BadErrorTagFormatException(String reason, Tag tag, Exception cause) {
super(makeMessage(reason, tag), cause);
}
}
/** Parses a javadoc tag corresponding to an error case and updates the error mapping. */
private void parseErrorTag(Tag tag) {
// Parse the @error tag text to find the @link inline tag.
SeeTag linkedTag;
try {
linkedTag =
Stream.of(tag.inlineTags())
.filter(SeeTag.class::isInstance)
.map(SeeTag.class::cast)
.collect(onlyElement());
} catch (NoSuchElementException | IllegalArgumentException e) {
throw new BadErrorTagFormatException(
String.format("expected one @link tag in tag text but found %s: %s",
(e instanceof NoSuchElementException ? "none" : "multiple"),
tag.text()),
tag, e);
}
// Check to see if the @link tag references a valid class.
ClassDoc exceptionRef = linkedTag.referencedClass();
if (exceptionRef == null) {
throw new BadErrorTagFormatException(
"could not resolve class from @link tag text: " + linkedTag.text(),
tag);
}
// Try to convert the referenced class into an ErrorCase; fail if it's not an EppException.
ErrorCase error;
try {
error = new ErrorCase(exceptionRef);
} catch (IllegalStateException | IllegalArgumentException e) {
throw new BadErrorTagFormatException(
"class referenced in @link is not a valid EppException: " + exceptionRef.qualifiedName(),
tag, e);
}
// Success; store this as a parsed error case.
errors.add(error);
errorsByCode.put(error.getCode(), error);
}
/**
* Represents an error case for a flow, with a reason for the error and the EPP error code.
*
* <p>This class is an immutable wrapper for the name of an EppException subclass that gets
* thrown to indicate an error condition. It overrides equals() and hashCode() so that
* instances of this class can be used in collections in the normal fashion.
*/
public static class ErrorCase {
/** The non-qualified name of the exception class. */
private final String name;
/** The fully-qualified name of the exception class. */
private final String className;
/** The reason this error was thrown, normally documented on the low-level exception class. */
private final String reason;
/** The EPP error code value corresponding to this error condition. */
private final long errorCode;
/** Constructs an ErrorCase from the corresponding class for a low-level flow exception. */
protected ErrorCase(ClassDoc exceptionDoc) {
name = exceptionDoc.name();
className = exceptionDoc.qualifiedName();
// The javadoc comment on the class explains the reason for the error condition.
reason = exceptionDoc.commentText();
ClassDoc highLevelExceptionDoc = getHighLevelExceptionFrom(exceptionDoc);
errorCode = extractErrorCode(highLevelExceptionDoc);
checkArgument(!exceptionDoc.isAbstract(),
"Cannot use an abstract subclass of EppException as an error case");
}
public String getName() {
return name;
}
protected String getClassName() {
return className;
}
public String getReason() {
return reason;
}
public long getCode() {
return errorCode;
}
/** Returns the direct subclass of EppException that this class is a subclass of (or is). */
private ClassDoc getHighLevelExceptionFrom(ClassDoc exceptionDoc) {
// While we're not yet at the root, move up the class hierarchy looking for EppException.
while (exceptionDoc.superclass() != null) {
if (exceptionDoc.superclass().qualifiedTypeName().equals(EXCEPTION_CLASS_NAME)) {
return exceptionDoc;
}
exceptionDoc = exceptionDoc.superclass();
}
// Failure; we reached the root without finding a subclass of EppException.
throw new IllegalArgumentException(
String.format("Class referenced is not a subclass of %s", EXCEPTION_CLASS_NAME));
}
/** Returns the corresponding EPP error code for an annotated subclass of EppException. */
private long extractErrorCode(ClassDoc exceptionDoc) {
try {
// We're looking for a specific annotation by name that should appear only once.
AnnotationDesc errorCodeAnnotation =
Arrays.stream(exceptionDoc.annotations())
.filter(
anno -> anno.annotationType().qualifiedTypeName().equals(CODE_ANNOTATION_NAME))
.findFirst()
.get();
// The annotation should have one element whose value converts to an EppResult.Code.
AnnotationDesc.ElementValuePair pair = errorCodeAnnotation.elementValues()[0];
String enumConstant = ((FieldDoc) pair.value().value()).name();
return Code.valueOf(enumConstant).code;
} catch (IllegalStateException e) {
throw new IllegalStateException(
"No error code annotation found on exception " + exceptionDoc.name(), e);
} catch (ArrayIndexOutOfBoundsException | ClassCastException | IllegalArgumentException e) {
throw new IllegalStateException("Bad annotation on exception " + exceptionDoc.name(), e);
}
}
@Override
public boolean equals(@Nullable Object object) {
// The className field canonically identifies the EppException wrapped by this class, and
// all other instance state is derived from that exception, so we only check className.
return object instanceof ErrorCase && this.className.equals(((ErrorCase) object).className);
}
@Override
public int hashCode() {
// See note for equals() - only className is needed for comparisons.
return className.hashCode();
}
}
}

View file

@ -0,0 +1,90 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.documentation;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.beust.jcommander.JCommander;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.ParameterException;
import com.beust.jcommander.Parameters;
import com.google.common.io.Files;
import java.io.File;
import java.io.IOException;
/**
* Tool to generate documentation for the EPP flows and corresponding external API.
*
* <p>Mostly responsible for producing standalone documentation files (HTML
* and Markdown) from flow information objects; those call into javadoc to
* extract documentation information from the flows package source files.
* See the {@link FlowDocumentation} class for more details.
*/
@Parameters(separators = " =", commandDescription = "Tool to generate EPP API documentation")
public class FlowDocumentationTool {
@Parameter(names = {"-o", "--output_file"},
description = "file where generated documentation will be written (use '-' for stdout)")
private String outputFileName;
@Parameter(names = {"--help", "--helpshort"}, description = "print this help", help = true)
private boolean displayHelp = false;
/** Parses command line flags and then runs the documentation tool. */
public static void main(String[] args) {
FlowDocumentationTool docTool = new FlowDocumentationTool();
JCommander jcommander = new JCommander(docTool);
jcommander.setProgramName("flow_docs_tool");
try {
jcommander.parse(args);
} catch (ParameterException e) {
jcommander.usage();
throw e;
}
if (docTool.displayHelp) {
jcommander.usage();
return;
}
docTool.run();
}
/** Generates flow documentation and then outputs it to the specified file. */
public void run() {
DocumentationGenerator docGenerator;
try {
docGenerator = new DocumentationGenerator();
} catch (IOException e) {
throw new RuntimeException("IO error while running Javadoc tool", e);
}
String output = docGenerator.generateMarkdown();
if (outputFileName.equals("-")) {
System.out.println(output);
} else {
if (outputFileName == null) {
outputFileName = "doclet.html";
}
try {
Files.asCharSink(new File(outputFileName), UTF_8).write(output);
} catch (IOException e) {
throw new RuntimeException("Could not write to specified output file", e);
}
}
}
}

Some files were not shown because too many files have changed in this diff Show more