Remove CommitLog and MapReduce-related code (#1670)

This commit is contained in:
gbrodman 2022-06-23 12:54:47 -04:00 committed by GitHub
parent 329b4b2735
commit 9b65f7109b
264 changed files with 1834 additions and 16033 deletions

View file

@ -11,13 +11,13 @@ com.google.api.grpc:proto-google-iam-v1:1.3.4
com.google.api:api-common:2.2.0
com.google.api:gax-httpjson:0.103.1
com.google.api:gax:2.18.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.auth:google-auth-library-credentials:1.7.0
com.google.auth:google-auth-library-oauth2-http:1.7.0
com.google.auto.value:auto-value-annotations:1.9
com.google.cloud:google-cloud-core-http:2.7.1
com.google.cloud:google-cloud-core:2.7.1
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6

View file

@ -11,13 +11,13 @@ com.google.api.grpc:proto-google-iam-v1:1.3.4
com.google.api:api-common:2.2.0
com.google.api:gax-httpjson:0.103.1
com.google.api:gax:2.18.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.auth:google-auth-library-credentials:1.7.0
com.google.auth:google-auth-library-oauth2-http:1.7.0
com.google.auto.value:auto-value-annotations:1.9
com.google.cloud:google-cloud-core-http:2.7.1
com.google.cloud:google-cloud-core:2.7.1
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6

View file

@ -11,13 +11,13 @@ com.google.api.grpc:proto-google-iam-v1:1.3.4
com.google.api:api-common:2.2.0
com.google.api:gax-httpjson:0.103.1
com.google.api:gax:2.18.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.auth:google-auth-library-credentials:1.7.0
com.google.auth:google-auth-library-oauth2-http:1.7.0
com.google.auto.value:auto-value-annotations:1.9
com.google.cloud:google-cloud-core-http:2.7.1
com.google.cloud:google-cloud-core:2.7.1
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6

View file

@ -11,13 +11,13 @@ com.google.api.grpc:proto-google-iam-v1:1.3.4
com.google.api:api-common:2.2.0
com.google.api:gax-httpjson:0.103.1
com.google.api:gax:2.18.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.auth:google-auth-library-credentials:1.7.0
com.google.auth:google-auth-library-oauth2-http:1.7.0
com.google.auto.value:auto-value-annotations:1.9
com.google.cloud:google-cloud-core-http:2.7.1
com.google.cloud:google-cloud-core:2.7.1
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6

View file

@ -11,13 +11,13 @@ com.google.api.grpc:proto-google-iam-v1:1.3.4
com.google.api:api-common:2.2.0
com.google.api:gax-httpjson:0.103.1
com.google.api:gax:2.18.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.auth:google-auth-library-credentials:1.7.0
com.google.auth:google-auth-library-oauth2-http:1.7.0
com.google.auto.value:auto-value-annotations:1.9
com.google.cloud:google-cloud-core-http:2.7.1
com.google.cloud:google-cloud-core:2.7.1
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6

View file

@ -11,13 +11,13 @@ com.google.api.grpc:proto-google-iam-v1:1.3.4
com.google.api:api-common:2.2.0
com.google.api:gax-httpjson:0.103.1
com.google.api:gax:2.18.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.auth:google-auth-library-credentials:1.7.0
com.google.auth:google-auth-library-oauth2-http:1.7.0
com.google.auto.value:auto-value-annotations:1.9
com.google.cloud:google-cloud-core-http:2.7.1
com.google.cloud:google-cloud-core:2.7.1
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6

View file

@ -11,13 +11,13 @@ com.google.api.grpc:proto-google-iam-v1:1.3.4
com.google.api:api-common:2.2.0
com.google.api:gax-httpjson:0.103.1
com.google.api:gax:2.18.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.auth:google-auth-library-credentials:1.7.0
com.google.auth:google-auth-library-oauth2-http:1.7.0
com.google.auto.value:auto-value-annotations:1.9
com.google.cloud:google-cloud-core-http:2.7.1
com.google.cloud:google-cloud-core:2.7.1
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6

View file

@ -11,13 +11,13 @@ com.google.api.grpc:proto-google-iam-v1:1.3.4
com.google.api:api-common:2.2.0
com.google.api:gax-httpjson:0.103.1
com.google.api:gax:2.18.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.auth:google-auth-library-credentials:1.7.0
com.google.auth:google-auth-library-oauth2-http:1.7.0
com.google.auto.value:auto-value-annotations:1.9
com.google.cloud:google-cloud-core-http:2.7.1
com.google.cloud:google-cloud-core:2.7.1
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6

View file

@ -11,13 +11,13 @@ com.google.api.grpc:proto-google-iam-v1:1.3.4
com.google.api:api-common:2.2.0
com.google.api:gax-httpjson:0.103.1
com.google.api:gax:2.18.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.auth:google-auth-library-credentials:1.7.0
com.google.auth:google-auth-library-oauth2-http:1.7.0
com.google.auto.value:auto-value-annotations:1.9
com.google.cloud:google-cloud-core-http:2.7.1
com.google.cloud:google-cloud-core:2.7.1
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6

View file

@ -196,7 +196,6 @@ dependencies {
compile deps['com.google.apis:google-api-services-storage']
testCompile deps['com.google.appengine:appengine-api-stubs']
compile deps['com.google.appengine.tools:appengine-gcs-client']
compile deps['com.google.appengine.tools:appengine-mapreduce']
compile deps['com.google.appengine.tools:appengine-pipeline']
compile deps['com.google.appengine:appengine-remote-api']
compile deps['com.google.auth:google-auth-library-credentials']

View file

@ -4,7 +4,6 @@
antlr:antlr:2.7.7
aopalliance:aopalliance:1.0
args4j:args4j:2.0.23
cglib:cglib-nodep:2.2
com.101tec:zkclient:0.10
com.beust:jcommander:1.60
com.fasterxml.jackson.core:jackson-annotations:2.13.3
@ -28,11 +27,11 @@ com.github.jnr:jnr-posix:3.1.15
com.github.jnr:jnr-unixsocket:0.38.17
com.github.jnr:jnr-x86asm:1.0.2
com.google.android:annotations:4.1.1.4
com.google.api-client:google-api-client-appengine:1.35.1
com.google.api-client:google-api-client-appengine:1.35.0
com.google.api-client:google-api-client-jackson2:1.32.2
com.google.api-client:google-api-client-java6:1.35.1
com.google.api-client:google-api-client-servlet:1.35.1
com.google.api-client:google-api-client:1.35.1
com.google.api-client:google-api-client-java6:1.35.0
com.google.api-client:google-api-client-servlet:1.35.0
com.google.api-client:google-api-client:1.35.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1:2.10.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta1:0.134.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta2:0.134.0
@ -85,10 +84,9 @@ com.google.apis:google-api-services-iamcredentials:v1-rev20210326-1.32.1
com.google.apis:google-api-services-monitoring:v3-rev20220525-1.32.1
com.google.apis:google-api-services-pubsub:v1-rev20211130-1.32.1
com.google.apis:google-api-services-sheets:v4-rev20220411-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220513-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220323-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.appengine.tools:appengine-gcs-client:0.8.1
com.google.appengine.tools:appengine-mapreduce:0.9
com.google.appengine.tools:appengine-pipeline:0.2.13
com.google.appengine:appengine-api-1.0-sdk:2.0.5
com.google.appengine:appengine-remote-api:2.0.5
@ -103,7 +101,7 @@ com.google.cloud.bigdataoss:util:2.2.6
com.google.cloud.bigtable:bigtable-client-core:1.26.3
com.google.cloud.bigtable:bigtable-metrics-api:1.26.3
com.google.cloud.datastore:datastore-v1-proto-client:2.1.3
com.google.cloud.sql:jdbc-socket-factory-core:1.6.1
com.google.cloud.sql:jdbc-socket-factory-core:1.6.0
com.google.cloud:google-cloud-bigquerystorage:2.10.0
com.google.cloud:google-cloud-bigtable:2.5.3
com.google.cloud:google-cloud-core-grpc:2.4.0
@ -115,7 +113,7 @@ com.google.cloud:google-cloud-pubsub:1.116.0
com.google.cloud:google-cloud-pubsublite:1.5.0
com.google.cloud:google-cloud-secretmanager:2.2.0
com.google.cloud:google-cloud-spanner:6.20.0
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.cloud:google-cloud-tasks:2.2.0
com.google.cloud:grpc-gcp:1.1.0
com.google.cloud:proto-google-cloud-firestore-bundle-v1:3.0.14
@ -123,7 +121,7 @@ com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6
com.google.dagger:dagger:2.42
com.google.errorprone:error_prone_annotations:2.14.0
com.google.errorprone:error_prone_annotations:2.13.1
com.google.escapevelocity:escapevelocity:0.9.1
com.google.flatbuffers:flatbuffers-java:1.12.0
com.google.flogger:flogger-system-backend:0.7.4
@ -133,28 +131,27 @@ com.google.guava:failureaccess:1.0.1
com.google.guava:guava:31.1-jre
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
com.google.gwt:gwt-user:2.9.0
com.google.http-client:google-http-client-apache-v2:1.42.0
com.google.http-client:google-http-client-appengine:1.42.0
com.google.http-client:google-http-client-gson:1.42.0
com.google.http-client:google-http-client-apache-v2:1.41.8
com.google.http-client:google-http-client-appengine:1.41.8
com.google.http-client:google-http-client-gson:1.41.8
com.google.http-client:google-http-client-jackson2:1.41.8
com.google.http-client:google-http-client-protobuf:1.40.1
com.google.http-client:google-http-client:1.42.0
com.google.http-client:google-http-client:1.41.8
com.google.inject.extensions:guice-multibindings:4.1.0
com.google.inject:guice:4.1.0
com.google.j2objc:j2objc-annotations:1.3
com.google.jsinterop:jsinterop-annotations:2.0.0
com.google.monitoring-client:metrics:1.0.7
com.google.monitoring-client:stackdriver:1.0.7
com.google.oauth-client:google-oauth-client-appengine:1.34.1
com.google.oauth-client:google-oauth-client-java6:1.34.1
com.google.oauth-client:google-oauth-client-jetty:1.34.1
com.google.oauth-client:google-oauth-client-servlet:1.34.1
com.google.oauth-client:google-oauth-client:1.34.1
com.google.oauth-client:google-oauth-client-appengine:1.34.0
com.google.oauth-client:google-oauth-client-java6:1.34.0
com.google.oauth-client:google-oauth-client-jetty:1.34.0
com.google.oauth-client:google-oauth-client-servlet:1.34.0
com.google.oauth-client:google-oauth-client:1.34.0
com.google.protobuf:protobuf-java-util:3.20.1
com.google.protobuf:protobuf-java:3.20.1
com.google.re2j:re2j:1.6
com.google.template:soy:2021-02-01
com.googlecode.charts4j:charts4j:1.3
com.googlecode.json-simple:json-simple:1.1.1
com.ibm.icu:icu4j:71.1
com.jcraft:jsch:0.1.55
@ -211,7 +208,6 @@ io.opencensus:opencensus-impl-core:0.31.0
io.opencensus:opencensus-impl:0.31.0
io.opencensus:opencensus-proto:0.2.0
io.perfmark:perfmark-api:0.25.0
it.unimi.dsi:fastutil:6.5.16
javax.activation:activation:1.1
javax.activation:javax.activation-api:1.2.0
javax.annotation:javax.annotation-api:1.3.2
@ -263,7 +259,6 @@ org.codehaus.jackson:jackson-core-asl:1.9.13
org.codehaus.jackson:jackson-mapper-asl:1.9.13
org.codehaus.mojo:animal-sniffer-annotations:1.21
org.conscrypt:conscrypt-openjdk-uber:2.5.1
org.easymock:easymock:3.0
org.flywaydb:flyway-core:8.5.12
org.glassfish.jaxb:jaxb-runtime:2.3.1
org.glassfish.jaxb:txw2:2.3.1
@ -280,13 +275,12 @@ org.joda:joda-money:1.0.1
org.json:json:20200518
org.jsoup:jsoup:1.15.1
org.jvnet.staxex:stax-ex:1.8
org.objenesis:objenesis:1.2
org.ow2.asm:asm-analysis:9.3
org.ow2.asm:asm-commons:9.2
org.ow2.asm:asm-tree:9.3
org.ow2.asm:asm-util:9.3
org.ow2.asm:asm:9.3
org.postgresql:postgresql:42.4.0
org.postgresql:postgresql:42.3.6
org.rnorth.duct-tape:duct-tape:1.0.8
org.slf4j:slf4j-api:1.7.36
org.springframework:spring-core:5.3.18

View file

@ -4,7 +4,6 @@
antlr:antlr:2.7.7
aopalliance:aopalliance:1.0
args4j:args4j:2.0.23
cglib:cglib-nodep:2.2
com.101tec:zkclient:0.10
com.beust:jcommander:1.60
com.fasterxml.jackson.core:jackson-annotations:2.13.3
@ -27,11 +26,11 @@ com.github.jnr:jnr-ffi:2.2.11
com.github.jnr:jnr-posix:3.1.15
com.github.jnr:jnr-unixsocket:0.38.17
com.github.jnr:jnr-x86asm:1.0.2
com.google.api-client:google-api-client-appengine:1.35.1
com.google.api-client:google-api-client-appengine:1.35.0
com.google.api-client:google-api-client-jackson2:1.32.2
com.google.api-client:google-api-client-java6:1.35.1
com.google.api-client:google-api-client-servlet:1.35.1
com.google.api-client:google-api-client:1.35.1
com.google.api-client:google-api-client-java6:1.35.0
com.google.api-client:google-api-client-servlet:1.35.0
com.google.api-client:google-api-client:1.35.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1:2.10.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta1:0.134.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta2:0.134.0
@ -84,10 +83,9 @@ com.google.apis:google-api-services-iamcredentials:v1-rev20210326-1.32.1
com.google.apis:google-api-services-monitoring:v3-rev20220525-1.32.1
com.google.apis:google-api-services-pubsub:v1-rev20211130-1.32.1
com.google.apis:google-api-services-sheets:v4-rev20220411-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220513-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220323-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.appengine.tools:appengine-gcs-client:0.8.1
com.google.appengine.tools:appengine-mapreduce:0.9
com.google.appengine.tools:appengine-pipeline:0.2.13
com.google.appengine:appengine-api-1.0-sdk:2.0.5
com.google.appengine:appengine-remote-api:2.0.5
@ -102,7 +100,7 @@ com.google.cloud.bigdataoss:util:2.2.6
com.google.cloud.bigtable:bigtable-client-core:1.26.3
com.google.cloud.bigtable:bigtable-metrics-api:1.26.3
com.google.cloud.datastore:datastore-v1-proto-client:2.1.3
com.google.cloud.sql:jdbc-socket-factory-core:1.6.1
com.google.cloud.sql:jdbc-socket-factory-core:1.6.0
com.google.cloud:google-cloud-bigquerystorage:2.10.0
com.google.cloud:google-cloud-bigtable:2.5.3
com.google.cloud:google-cloud-core-grpc:2.4.0
@ -114,7 +112,7 @@ com.google.cloud:google-cloud-pubsub:1.116.0
com.google.cloud:google-cloud-pubsublite:1.5.0
com.google.cloud:google-cloud-secretmanager:2.2.0
com.google.cloud:google-cloud-spanner:6.20.0
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.cloud:google-cloud-tasks:2.2.0
com.google.cloud:grpc-gcp:1.1.0
com.google.cloud:proto-google-cloud-firestore-bundle-v1:3.0.14
@ -122,7 +120,7 @@ com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6
com.google.dagger:dagger:2.42
com.google.errorprone:error_prone_annotations:2.14.0
com.google.errorprone:error_prone_annotations:2.13.1
com.google.escapevelocity:escapevelocity:0.9.1
com.google.flatbuffers:flatbuffers-java:1.12.0
com.google.flogger:flogger-system-backend:0.7.4
@ -132,28 +130,27 @@ com.google.guava:failureaccess:1.0.1
com.google.guava:guava:31.1-jre
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
com.google.gwt:gwt-user:2.9.0
com.google.http-client:google-http-client-apache-v2:1.42.0
com.google.http-client:google-http-client-appengine:1.42.0
com.google.http-client:google-http-client-gson:1.42.0
com.google.http-client:google-http-client-apache-v2:1.41.8
com.google.http-client:google-http-client-appengine:1.41.8
com.google.http-client:google-http-client-gson:1.41.8
com.google.http-client:google-http-client-jackson2:1.41.8
com.google.http-client:google-http-client-protobuf:1.40.1
com.google.http-client:google-http-client:1.42.0
com.google.http-client:google-http-client:1.41.8
com.google.inject.extensions:guice-multibindings:4.1.0
com.google.inject:guice:4.1.0
com.google.j2objc:j2objc-annotations:1.3
com.google.jsinterop:jsinterop-annotations:2.0.0
com.google.monitoring-client:metrics:1.0.7
com.google.monitoring-client:stackdriver:1.0.7
com.google.oauth-client:google-oauth-client-appengine:1.34.1
com.google.oauth-client:google-oauth-client-java6:1.34.1
com.google.oauth-client:google-oauth-client-jetty:1.34.1
com.google.oauth-client:google-oauth-client-servlet:1.34.1
com.google.oauth-client:google-oauth-client:1.34.1
com.google.oauth-client:google-oauth-client-appengine:1.34.0
com.google.oauth-client:google-oauth-client-java6:1.34.0
com.google.oauth-client:google-oauth-client-jetty:1.34.0
com.google.oauth-client:google-oauth-client-servlet:1.34.0
com.google.oauth-client:google-oauth-client:1.34.0
com.google.protobuf:protobuf-java-util:3.20.1
com.google.protobuf:protobuf-java:3.20.1
com.google.re2j:re2j:1.6
com.google.template:soy:2021-02-01
com.googlecode.charts4j:charts4j:1.3
com.googlecode.json-simple:json-simple:1.1.1
com.ibm.icu:icu4j:71.1
com.jcraft:jsch:0.1.55
@ -206,7 +203,6 @@ io.opencensus:opencensus-exporter-stats-stackdriver:0.31.0
io.opencensus:opencensus-impl-core:0.31.0
io.opencensus:opencensus-impl:0.31.0
io.opencensus:opencensus-proto:0.2.0
it.unimi.dsi:fastutil:6.5.16
javax.activation:activation:1.1
javax.activation:javax.activation-api:1.2.0
javax.annotation:javax.annotation-api:1.3.2
@ -256,7 +252,6 @@ org.checkerframework:checker-qual:3.22.0
org.codehaus.jackson:jackson-core-asl:1.9.13
org.codehaus.jackson:jackson-mapper-asl:1.9.13
org.conscrypt:conscrypt-openjdk-uber:2.5.1
org.easymock:easymock:3.0
org.flywaydb:flyway-core:8.5.12
org.glassfish.jaxb:jaxb-runtime:2.3.1
org.glassfish.jaxb:txw2:2.3.1
@ -272,13 +267,12 @@ org.joda:joda-money:1.0.1
org.json:json:20200518
org.jsoup:jsoup:1.15.1
org.jvnet.staxex:stax-ex:1.8
org.objenesis:objenesis:1.2
org.ow2.asm:asm-analysis:9.3
org.ow2.asm:asm-commons:9.2
org.ow2.asm:asm-tree:9.3
org.ow2.asm:asm-util:9.3
org.ow2.asm:asm:9.3
org.postgresql:postgresql:42.4.0
org.postgresql:postgresql:42.3.6
org.rnorth.duct-tape:duct-tape:1.0.8
org.slf4j:slf4j-api:1.7.36
org.springframework:spring-core:5.3.18

View file

@ -4,7 +4,6 @@
antlr:antlr:2.7.7
aopalliance:aopalliance:1.0
args4j:args4j:2.0.23
cglib:cglib-nodep:2.2
com.101tec:zkclient:0.10
com.beust:jcommander:1.60
com.fasterxml.jackson.core:jackson-annotations:2.13.3
@ -28,11 +27,11 @@ com.github.jnr:jnr-posix:3.1.15
com.github.jnr:jnr-unixsocket:0.38.17
com.github.jnr:jnr-x86asm:1.0.2
com.google.android:annotations:4.1.1.4
com.google.api-client:google-api-client-appengine:1.35.1
com.google.api-client:google-api-client-appengine:1.35.0
com.google.api-client:google-api-client-jackson2:1.32.2
com.google.api-client:google-api-client-java6:1.35.1
com.google.api-client:google-api-client-servlet:1.35.1
com.google.api-client:google-api-client:1.35.1
com.google.api-client:google-api-client-java6:1.35.0
com.google.api-client:google-api-client-servlet:1.35.0
com.google.api-client:google-api-client:1.35.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1:2.10.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta1:0.134.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta2:0.134.0
@ -85,10 +84,9 @@ com.google.apis:google-api-services-iamcredentials:v1-rev20210326-1.32.1
com.google.apis:google-api-services-monitoring:v3-rev20220525-1.32.1
com.google.apis:google-api-services-pubsub:v1-rev20211130-1.32.1
com.google.apis:google-api-services-sheets:v4-rev20220411-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220513-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220323-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.appengine.tools:appengine-gcs-client:0.8.1
com.google.appengine.tools:appengine-mapreduce:0.9
com.google.appengine.tools:appengine-pipeline:0.2.13
com.google.appengine:appengine-api-1.0-sdk:2.0.5
com.google.appengine:appengine-remote-api:2.0.5
@ -103,8 +101,8 @@ com.google.cloud.bigdataoss:util:2.2.6
com.google.cloud.bigtable:bigtable-client-core:1.26.3
com.google.cloud.bigtable:bigtable-metrics-api:1.26.3
com.google.cloud.datastore:datastore-v1-proto-client:2.1.3
com.google.cloud.sql:jdbc-socket-factory-core:1.6.1
com.google.cloud.sql:postgres-socket-factory:1.6.1
com.google.cloud.sql:jdbc-socket-factory-core:1.6.0
com.google.cloud.sql:postgres-socket-factory:1.6.0
com.google.cloud:google-cloud-bigquerystorage:2.10.0
com.google.cloud:google-cloud-bigtable:2.5.3
com.google.cloud:google-cloud-core-grpc:2.4.0
@ -116,7 +114,7 @@ com.google.cloud:google-cloud-pubsub:1.116.0
com.google.cloud:google-cloud-pubsublite:1.5.0
com.google.cloud:google-cloud-secretmanager:2.2.0
com.google.cloud:google-cloud-spanner:6.20.0
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.cloud:google-cloud-tasks:2.2.0
com.google.cloud:grpc-gcp:1.1.0
com.google.cloud:proto-google-cloud-firestore-bundle-v1:3.0.14
@ -124,7 +122,7 @@ com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6
com.google.dagger:dagger:2.42
com.google.errorprone:error_prone_annotations:2.14.0
com.google.errorprone:error_prone_annotations:2.13.1
com.google.escapevelocity:escapevelocity:0.9.1
com.google.flatbuffers:flatbuffers-java:1.12.0
com.google.flogger:flogger-system-backend:0.7.4
@ -134,28 +132,27 @@ com.google.guava:failureaccess:1.0.1
com.google.guava:guava:31.1-jre
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
com.google.gwt:gwt-user:2.9.0
com.google.http-client:google-http-client-apache-v2:1.42.0
com.google.http-client:google-http-client-appengine:1.42.0
com.google.http-client:google-http-client-gson:1.42.0
com.google.http-client:google-http-client-apache-v2:1.41.8
com.google.http-client:google-http-client-appengine:1.41.8
com.google.http-client:google-http-client-gson:1.41.8
com.google.http-client:google-http-client-jackson2:1.41.8
com.google.http-client:google-http-client-protobuf:1.40.1
com.google.http-client:google-http-client:1.42.0
com.google.http-client:google-http-client:1.41.8
com.google.inject.extensions:guice-multibindings:4.1.0
com.google.inject:guice:4.1.0
com.google.j2objc:j2objc-annotations:1.3
com.google.jsinterop:jsinterop-annotations:2.0.0
com.google.monitoring-client:metrics:1.0.7
com.google.monitoring-client:stackdriver:1.0.7
com.google.oauth-client:google-oauth-client-appengine:1.34.1
com.google.oauth-client:google-oauth-client-java6:1.34.1
com.google.oauth-client:google-oauth-client-jetty:1.34.1
com.google.oauth-client:google-oauth-client-servlet:1.34.1
com.google.oauth-client:google-oauth-client:1.34.1
com.google.oauth-client:google-oauth-client-appengine:1.34.0
com.google.oauth-client:google-oauth-client-java6:1.34.0
com.google.oauth-client:google-oauth-client-jetty:1.34.0
com.google.oauth-client:google-oauth-client-servlet:1.34.0
com.google.oauth-client:google-oauth-client:1.34.0
com.google.protobuf:protobuf-java-util:3.20.1
com.google.protobuf:protobuf-java:3.20.1
com.google.re2j:re2j:1.6
com.google.template:soy:2021-02-01
com.googlecode.charts4j:charts4j:1.3
com.googlecode.json-simple:json-simple:1.1.1
com.ibm.icu:icu4j:71.1
com.jcraft:jsch:0.1.55
@ -219,7 +216,6 @@ io.opencensus:opencensus-impl-core:0.31.0
io.opencensus:opencensus-impl:0.31.0
io.opencensus:opencensus-proto:0.2.0
io.perfmark:perfmark-api:0.25.0
it.unimi.dsi:fastutil:6.5.16
javax.activation:activation:1.1
javax.activation:javax.activation-api:1.2.0
javax.annotation:javax.annotation-api:1.3.2
@ -273,7 +269,6 @@ org.codehaus.jackson:jackson-core-asl:1.9.13
org.codehaus.jackson:jackson-mapper-asl:1.9.13
org.codehaus.mojo:animal-sniffer-annotations:1.21
org.conscrypt:conscrypt-openjdk-uber:2.5.1
org.easymock:easymock:3.0
org.flywaydb:flyway-core:8.5.12
org.glassfish.jaxb:jaxb-runtime:2.3.1
org.glassfish.jaxb:txw2:2.3.1
@ -290,13 +285,12 @@ org.joda:joda-money:1.0.1
org.json:json:20200518
org.jsoup:jsoup:1.15.1
org.jvnet.staxex:stax-ex:1.8
org.objenesis:objenesis:1.2
org.ow2.asm:asm-analysis:9.3
org.ow2.asm:asm-commons:9.2
org.ow2.asm:asm-tree:9.3
org.ow2.asm:asm-util:9.3
org.ow2.asm:asm:9.3
org.postgresql:postgresql:42.4.0
org.postgresql:postgresql:42.3.6
org.rnorth.duct-tape:duct-tape:1.0.8
org.slf4j:jcl-over-slf4j:1.7.30
org.slf4j:jul-to-slf4j:1.7.30

View file

@ -4,7 +4,6 @@
antlr:antlr:2.7.7
aopalliance:aopalliance:1.0
args4j:args4j:2.0.23
cglib:cglib-nodep:2.2
com.101tec:zkclient:0.10
com.beust:jcommander:1.60
com.fasterxml.jackson.core:jackson-annotations:2.13.3
@ -28,11 +27,11 @@ com.github.jnr:jnr-posix:3.1.15
com.github.jnr:jnr-unixsocket:0.38.17
com.github.jnr:jnr-x86asm:1.0.2
com.google.android:annotations:4.1.1.4
com.google.api-client:google-api-client-appengine:1.35.1
com.google.api-client:google-api-client-appengine:1.35.0
com.google.api-client:google-api-client-jackson2:1.32.2
com.google.api-client:google-api-client-java6:1.35.1
com.google.api-client:google-api-client-servlet:1.35.1
com.google.api-client:google-api-client:1.35.1
com.google.api-client:google-api-client-java6:1.35.0
com.google.api-client:google-api-client-servlet:1.35.0
com.google.api-client:google-api-client:1.35.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1:2.10.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta1:0.134.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta2:0.134.0
@ -85,10 +84,9 @@ com.google.apis:google-api-services-iamcredentials:v1-rev20210326-1.32.1
com.google.apis:google-api-services-monitoring:v3-rev20220525-1.32.1
com.google.apis:google-api-services-pubsub:v1-rev20211130-1.32.1
com.google.apis:google-api-services-sheets:v4-rev20220411-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220513-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220323-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.appengine.tools:appengine-gcs-client:0.8.1
com.google.appengine.tools:appengine-mapreduce:0.9
com.google.appengine.tools:appengine-pipeline:0.2.13
com.google.appengine:appengine-api-1.0-sdk:2.0.5
com.google.appengine:appengine-remote-api:2.0.5
@ -103,8 +101,8 @@ com.google.cloud.bigdataoss:util:2.2.6
com.google.cloud.bigtable:bigtable-client-core:1.26.3
com.google.cloud.bigtable:bigtable-metrics-api:1.26.3
com.google.cloud.datastore:datastore-v1-proto-client:2.1.3
com.google.cloud.sql:jdbc-socket-factory-core:1.6.1
com.google.cloud.sql:postgres-socket-factory:1.6.1
com.google.cloud.sql:jdbc-socket-factory-core:1.6.0
com.google.cloud.sql:postgres-socket-factory:1.6.0
com.google.cloud:google-cloud-bigquerystorage:2.10.0
com.google.cloud:google-cloud-bigtable:2.5.3
com.google.cloud:google-cloud-core-grpc:2.4.0
@ -116,7 +114,7 @@ com.google.cloud:google-cloud-pubsub:1.116.0
com.google.cloud:google-cloud-pubsublite:1.5.0
com.google.cloud:google-cloud-secretmanager:2.2.0
com.google.cloud:google-cloud-spanner:6.20.0
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.cloud:google-cloud-tasks:2.2.0
com.google.cloud:grpc-gcp:1.1.0
com.google.cloud:proto-google-cloud-firestore-bundle-v1:3.0.14
@ -124,7 +122,7 @@ com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6
com.google.dagger:dagger:2.42
com.google.errorprone:error_prone_annotations:2.14.0
com.google.errorprone:error_prone_annotations:2.13.1
com.google.escapevelocity:escapevelocity:0.9.1
com.google.flatbuffers:flatbuffers-java:1.12.0
com.google.flogger:flogger-system-backend:0.7.4
@ -134,28 +132,27 @@ com.google.guava:failureaccess:1.0.1
com.google.guava:guava:31.1-jre
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
com.google.gwt:gwt-user:2.9.0
com.google.http-client:google-http-client-apache-v2:1.42.0
com.google.http-client:google-http-client-appengine:1.42.0
com.google.http-client:google-http-client-gson:1.42.0
com.google.http-client:google-http-client-apache-v2:1.41.8
com.google.http-client:google-http-client-appengine:1.41.8
com.google.http-client:google-http-client-gson:1.41.8
com.google.http-client:google-http-client-jackson2:1.41.8
com.google.http-client:google-http-client-protobuf:1.40.1
com.google.http-client:google-http-client:1.42.0
com.google.http-client:google-http-client:1.41.8
com.google.inject.extensions:guice-multibindings:4.1.0
com.google.inject:guice:4.1.0
com.google.j2objc:j2objc-annotations:1.3
com.google.jsinterop:jsinterop-annotations:2.0.0
com.google.monitoring-client:metrics:1.0.7
com.google.monitoring-client:stackdriver:1.0.7
com.google.oauth-client:google-oauth-client-appengine:1.34.1
com.google.oauth-client:google-oauth-client-java6:1.34.1
com.google.oauth-client:google-oauth-client-jetty:1.34.1
com.google.oauth-client:google-oauth-client-servlet:1.34.1
com.google.oauth-client:google-oauth-client:1.34.1
com.google.oauth-client:google-oauth-client-appengine:1.34.0
com.google.oauth-client:google-oauth-client-java6:1.34.0
com.google.oauth-client:google-oauth-client-jetty:1.34.0
com.google.oauth-client:google-oauth-client-servlet:1.34.0
com.google.oauth-client:google-oauth-client:1.34.0
com.google.protobuf:protobuf-java-util:3.20.1
com.google.protobuf:protobuf-java:3.20.1
com.google.re2j:re2j:1.6
com.google.template:soy:2021-02-01
com.googlecode.charts4j:charts4j:1.3
com.googlecode.json-simple:json-simple:1.1.1
com.ibm.icu:icu4j:71.1
com.jcraft:jsch:0.1.55
@ -219,7 +216,6 @@ io.opencensus:opencensus-impl-core:0.31.0
io.opencensus:opencensus-impl:0.31.0
io.opencensus:opencensus-proto:0.2.0
io.perfmark:perfmark-api:0.25.0
it.unimi.dsi:fastutil:6.5.16
javax.activation:activation:1.1
javax.activation:javax.activation-api:1.2.0
javax.annotation:javax.annotation-api:1.3.2
@ -272,7 +268,6 @@ org.codehaus.jackson:jackson-core-asl:1.9.13
org.codehaus.jackson:jackson-mapper-asl:1.9.13
org.codehaus.mojo:animal-sniffer-annotations:1.21
org.conscrypt:conscrypt-openjdk-uber:2.5.1
org.easymock:easymock:3.0
org.flywaydb:flyway-core:8.5.12
org.glassfish.jaxb:jaxb-runtime:2.3.1
org.glassfish.jaxb:txw2:2.3.1
@ -288,13 +283,12 @@ org.joda:joda-money:1.0.1
org.json:json:20200518
org.jsoup:jsoup:1.15.1
org.jvnet.staxex:stax-ex:1.8
org.objenesis:objenesis:1.2
org.ow2.asm:asm-analysis:9.3
org.ow2.asm:asm-commons:9.2
org.ow2.asm:asm-tree:9.3
org.ow2.asm:asm-util:9.3
org.ow2.asm:asm:9.3
org.postgresql:postgresql:42.4.0
org.postgresql:postgresql:42.3.6
org.rnorth.duct-tape:duct-tape:1.0.8
org.slf4j:jcl-over-slf4j:1.7.30
org.slf4j:jul-to-slf4j:1.7.30

View file

@ -4,7 +4,7 @@
com.sun.activation:jakarta.activation:1.2.2
com.sun.activation:javax.activation:1.2.0
com.sun.xml.bind:jaxb-impl:2.3.3
com.sun.xml.bind:jaxb-osgi:4.0.0
com.sun.xml.bind:jaxb-osgi:4.0.0-M4
com.sun.xml.bind:jaxb-xjc:2.3.3
jakarta.activation:jakarta.activation-api:2.1.0
jakarta.xml.bind:jakarta.xml.bind-api:4.0.0

View file

@ -4,7 +4,6 @@
antlr:antlr:2.7.7
aopalliance:aopalliance:1.0
args4j:args4j:2.0.23
cglib:cglib-nodep:2.2
com.101tec:zkclient:0.10
com.beust:jcommander:1.60
com.fasterxml.jackson.core:jackson-annotations:2.13.3
@ -28,11 +27,11 @@ com.github.jnr:jnr-posix:3.1.15
com.github.jnr:jnr-unixsocket:0.38.17
com.github.jnr:jnr-x86asm:1.0.2
com.google.android:annotations:4.1.1.4
com.google.api-client:google-api-client-appengine:1.35.1
com.google.api-client:google-api-client-appengine:1.35.0
com.google.api-client:google-api-client-jackson2:1.32.2
com.google.api-client:google-api-client-java6:1.35.1
com.google.api-client:google-api-client-servlet:1.35.1
com.google.api-client:google-api-client:1.35.1
com.google.api-client:google-api-client-java6:1.35.0
com.google.api-client:google-api-client-servlet:1.35.0
com.google.api-client:google-api-client:1.35.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1:2.10.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta1:0.134.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta2:0.134.0
@ -85,10 +84,9 @@ com.google.apis:google-api-services-iamcredentials:v1-rev20210326-1.32.1
com.google.apis:google-api-services-monitoring:v3-rev20220525-1.32.1
com.google.apis:google-api-services-pubsub:v1-rev20211130-1.32.1
com.google.apis:google-api-services-sheets:v4-rev20220411-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220513-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220323-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.appengine.tools:appengine-gcs-client:0.8.1
com.google.appengine.tools:appengine-mapreduce:0.9
com.google.appengine.tools:appengine-pipeline:0.2.13
com.google.appengine:appengine-api-1.0-sdk:2.0.5
com.google.appengine:appengine-remote-api:2.0.5
@ -103,7 +101,7 @@ com.google.cloud.bigdataoss:util:2.2.6
com.google.cloud.bigtable:bigtable-client-core:1.26.3
com.google.cloud.bigtable:bigtable-metrics-api:1.26.3
com.google.cloud.datastore:datastore-v1-proto-client:2.1.3
com.google.cloud.sql:jdbc-socket-factory-core:1.6.1
com.google.cloud.sql:jdbc-socket-factory-core:1.6.0
com.google.cloud:google-cloud-bigquerystorage:2.10.0
com.google.cloud:google-cloud-bigtable:2.5.3
com.google.cloud:google-cloud-core-grpc:2.4.0
@ -115,7 +113,7 @@ com.google.cloud:google-cloud-pubsub:1.116.0
com.google.cloud:google-cloud-pubsublite:1.5.0
com.google.cloud:google-cloud-secretmanager:2.2.0
com.google.cloud:google-cloud-spanner:6.20.0
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.cloud:google-cloud-tasks:2.2.0
com.google.cloud:grpc-gcp:1.1.0
com.google.cloud:proto-google-cloud-firestore-bundle-v1:3.0.14
@ -123,7 +121,7 @@ com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6
com.google.dagger:dagger:2.42
com.google.errorprone:error_prone_annotations:2.14.0
com.google.errorprone:error_prone_annotations:2.13.1
com.google.escapevelocity:escapevelocity:0.9.1
com.google.flatbuffers:flatbuffers-java:1.12.0
com.google.flogger:flogger-system-backend:0.7.4
@ -133,28 +131,27 @@ com.google.guava:failureaccess:1.0.1
com.google.guava:guava:31.1-jre
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
com.google.gwt:gwt-user:2.9.0
com.google.http-client:google-http-client-apache-v2:1.42.0
com.google.http-client:google-http-client-appengine:1.42.0
com.google.http-client:google-http-client-gson:1.42.0
com.google.http-client:google-http-client-apache-v2:1.41.8
com.google.http-client:google-http-client-appengine:1.41.8
com.google.http-client:google-http-client-gson:1.41.8
com.google.http-client:google-http-client-jackson2:1.41.8
com.google.http-client:google-http-client-protobuf:1.40.1
com.google.http-client:google-http-client:1.42.0
com.google.http-client:google-http-client:1.41.8
com.google.inject.extensions:guice-multibindings:4.1.0
com.google.inject:guice:4.1.0
com.google.j2objc:j2objc-annotations:1.3
com.google.jsinterop:jsinterop-annotations:2.0.0
com.google.monitoring-client:metrics:1.0.7
com.google.monitoring-client:stackdriver:1.0.7
com.google.oauth-client:google-oauth-client-appengine:1.34.1
com.google.oauth-client:google-oauth-client-java6:1.34.1
com.google.oauth-client:google-oauth-client-jetty:1.34.1
com.google.oauth-client:google-oauth-client-servlet:1.34.1
com.google.oauth-client:google-oauth-client:1.34.1
com.google.oauth-client:google-oauth-client-appengine:1.34.0
com.google.oauth-client:google-oauth-client-java6:1.34.0
com.google.oauth-client:google-oauth-client-jetty:1.34.0
com.google.oauth-client:google-oauth-client-servlet:1.34.0
com.google.oauth-client:google-oauth-client:1.34.0
com.google.protobuf:protobuf-java-util:3.20.1
com.google.protobuf:protobuf-java:3.20.1
com.google.re2j:re2j:1.6
com.google.template:soy:2021-02-01
com.googlecode.charts4j:charts4j:1.3
com.googlecode.json-simple:json-simple:1.1.1
com.ibm.icu:icu4j:71.1
com.jcraft:jsch:0.1.55
@ -211,7 +208,6 @@ io.opencensus:opencensus-impl-core:0.31.0
io.opencensus:opencensus-impl:0.31.0
io.opencensus:opencensus-proto:0.2.0
io.perfmark:perfmark-api:0.25.0
it.unimi.dsi:fastutil:6.5.16
javax.activation:activation:1.1
javax.activation:javax.activation-api:1.2.0
javax.annotation:javax.annotation-api:1.3.2
@ -263,7 +259,6 @@ org.codehaus.jackson:jackson-core-asl:1.9.13
org.codehaus.jackson:jackson-mapper-asl:1.9.13
org.codehaus.mojo:animal-sniffer-annotations:1.21
org.conscrypt:conscrypt-openjdk-uber:2.5.1
org.easymock:easymock:3.0
org.flywaydb:flyway-core:8.5.12
org.glassfish.jaxb:jaxb-runtime:2.3.1
org.glassfish.jaxb:txw2:2.3.1
@ -280,13 +275,12 @@ org.joda:joda-money:1.0.1
org.json:json:20200518
org.jsoup:jsoup:1.15.1
org.jvnet.staxex:stax-ex:1.8
org.objenesis:objenesis:1.2
org.ow2.asm:asm-analysis:9.3
org.ow2.asm:asm-commons:9.2
org.ow2.asm:asm-tree:9.3
org.ow2.asm:asm-util:9.3
org.ow2.asm:asm:9.3
org.postgresql:postgresql:42.4.0
org.postgresql:postgresql:42.3.6
org.rnorth.duct-tape:duct-tape:1.0.8
org.slf4j:slf4j-api:1.7.36
org.springframework:spring-core:5.3.18

View file

@ -4,7 +4,6 @@
antlr:antlr:2.7.7
aopalliance:aopalliance:1.0
args4j:args4j:2.0.23
cglib:cglib-nodep:2.2
com.101tec:zkclient:0.10
com.beust:jcommander:1.60
com.fasterxml.jackson.core:jackson-annotations:2.13.3
@ -27,11 +26,11 @@ com.github.jnr:jnr-ffi:2.2.11
com.github.jnr:jnr-posix:3.1.15
com.github.jnr:jnr-unixsocket:0.38.17
com.github.jnr:jnr-x86asm:1.0.2
com.google.api-client:google-api-client-appengine:1.35.1
com.google.api-client:google-api-client-appengine:1.35.0
com.google.api-client:google-api-client-jackson2:1.32.2
com.google.api-client:google-api-client-java6:1.35.1
com.google.api-client:google-api-client-servlet:1.35.1
com.google.api-client:google-api-client:1.35.1
com.google.api-client:google-api-client-java6:1.35.0
com.google.api-client:google-api-client-servlet:1.35.0
com.google.api-client:google-api-client:1.35.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1:2.10.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta1:0.134.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta2:0.134.0
@ -84,10 +83,9 @@ com.google.apis:google-api-services-iamcredentials:v1-rev20210326-1.32.1
com.google.apis:google-api-services-monitoring:v3-rev20220525-1.32.1
com.google.apis:google-api-services-pubsub:v1-rev20211130-1.32.1
com.google.apis:google-api-services-sheets:v4-rev20220411-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220513-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220323-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.appengine.tools:appengine-gcs-client:0.8.1
com.google.appengine.tools:appengine-mapreduce:0.9
com.google.appengine.tools:appengine-pipeline:0.2.13
com.google.appengine:appengine-api-1.0-sdk:2.0.5
com.google.appengine:appengine-remote-api:2.0.5
@ -102,7 +100,7 @@ com.google.cloud.bigdataoss:util:2.2.6
com.google.cloud.bigtable:bigtable-client-core:1.26.3
com.google.cloud.bigtable:bigtable-metrics-api:1.26.3
com.google.cloud.datastore:datastore-v1-proto-client:2.1.3
com.google.cloud.sql:jdbc-socket-factory-core:1.6.1
com.google.cloud.sql:jdbc-socket-factory-core:1.6.0
com.google.cloud:google-cloud-bigquerystorage:2.10.0
com.google.cloud:google-cloud-bigtable:2.5.3
com.google.cloud:google-cloud-core-grpc:2.4.0
@ -114,7 +112,7 @@ com.google.cloud:google-cloud-pubsub:1.116.0
com.google.cloud:google-cloud-pubsublite:1.5.0
com.google.cloud:google-cloud-secretmanager:2.2.0
com.google.cloud:google-cloud-spanner:6.20.0
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.cloud:google-cloud-tasks:2.2.0
com.google.cloud:grpc-gcp:1.1.0
com.google.cloud:proto-google-cloud-firestore-bundle-v1:3.0.14
@ -122,7 +120,7 @@ com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6
com.google.dagger:dagger:2.42
com.google.errorprone:error_prone_annotations:2.14.0
com.google.errorprone:error_prone_annotations:2.13.1
com.google.escapevelocity:escapevelocity:0.9.1
com.google.flatbuffers:flatbuffers-java:1.12.0
com.google.flogger:flogger-system-backend:0.7.4
@ -132,28 +130,27 @@ com.google.guava:failureaccess:1.0.1
com.google.guava:guava:31.1-jre
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
com.google.gwt:gwt-user:2.9.0
com.google.http-client:google-http-client-apache-v2:1.42.0
com.google.http-client:google-http-client-appengine:1.42.0
com.google.http-client:google-http-client-gson:1.42.0
com.google.http-client:google-http-client-apache-v2:1.41.8
com.google.http-client:google-http-client-appengine:1.41.8
com.google.http-client:google-http-client-gson:1.41.8
com.google.http-client:google-http-client-jackson2:1.41.8
com.google.http-client:google-http-client-protobuf:1.40.1
com.google.http-client:google-http-client:1.42.0
com.google.http-client:google-http-client:1.41.8
com.google.inject.extensions:guice-multibindings:4.1.0
com.google.inject:guice:4.1.0
com.google.j2objc:j2objc-annotations:1.3
com.google.jsinterop:jsinterop-annotations:2.0.0
com.google.monitoring-client:metrics:1.0.7
com.google.monitoring-client:stackdriver:1.0.7
com.google.oauth-client:google-oauth-client-appengine:1.34.1
com.google.oauth-client:google-oauth-client-java6:1.34.1
com.google.oauth-client:google-oauth-client-jetty:1.34.1
com.google.oauth-client:google-oauth-client-servlet:1.34.1
com.google.oauth-client:google-oauth-client:1.34.1
com.google.oauth-client:google-oauth-client-appengine:1.34.0
com.google.oauth-client:google-oauth-client-java6:1.34.0
com.google.oauth-client:google-oauth-client-jetty:1.34.0
com.google.oauth-client:google-oauth-client-servlet:1.34.0
com.google.oauth-client:google-oauth-client:1.34.0
com.google.protobuf:protobuf-java-util:3.20.1
com.google.protobuf:protobuf-java:3.20.1
com.google.re2j:re2j:1.6
com.google.template:soy:2021-02-01
com.googlecode.charts4j:charts4j:1.3
com.googlecode.json-simple:json-simple:1.1.1
com.ibm.icu:icu4j:71.1
com.jcraft:jsch:0.1.55
@ -206,7 +203,6 @@ io.opencensus:opencensus-exporter-stats-stackdriver:0.31.0
io.opencensus:opencensus-impl-core:0.31.0
io.opencensus:opencensus-impl:0.31.0
io.opencensus:opencensus-proto:0.2.0
it.unimi.dsi:fastutil:6.5.16
javax.activation:activation:1.1
javax.activation:javax.activation-api:1.2.0
javax.annotation:javax.annotation-api:1.3.2
@ -257,7 +253,6 @@ org.checkerframework:checker-qual:3.22.0
org.codehaus.jackson:jackson-core-asl:1.9.13
org.codehaus.jackson:jackson-mapper-asl:1.9.13
org.conscrypt:conscrypt-openjdk-uber:2.5.1
org.easymock:easymock:3.0
org.flywaydb:flyway-core:8.5.12
org.glassfish.jaxb:jaxb-runtime:2.3.1
org.glassfish.jaxb:txw2:2.3.1
@ -274,13 +269,12 @@ org.joda:joda-money:1.0.1
org.json:json:20200518
org.jsoup:jsoup:1.15.1
org.jvnet.staxex:stax-ex:1.8
org.objenesis:objenesis:1.2
org.ow2.asm:asm-analysis:9.3
org.ow2.asm:asm-commons:9.2
org.ow2.asm:asm-tree:9.3
org.ow2.asm:asm-util:9.3
org.ow2.asm:asm:9.3
org.postgresql:postgresql:42.4.0
org.postgresql:postgresql:42.3.6
org.rnorth.duct-tape:duct-tape:1.0.8
org.slf4j:slf4j-api:1.7.36
org.springframework:spring-core:5.3.18

View file

@ -4,7 +4,6 @@
antlr:antlr:2.7.7
aopalliance:aopalliance:1.0
args4j:args4j:2.0.23
cglib:cglib-nodep:2.2
com.101tec:zkclient:0.10
com.beust:jcommander:1.60
com.fasterxml.jackson.core:jackson-annotations:2.13.3
@ -28,11 +27,11 @@ com.github.jnr:jnr-posix:3.1.15
com.github.jnr:jnr-unixsocket:0.38.17
com.github.jnr:jnr-x86asm:1.0.2
com.google.android:annotations:4.1.1.4
com.google.api-client:google-api-client-appengine:1.35.1
com.google.api-client:google-api-client-appengine:1.35.0
com.google.api-client:google-api-client-jackson2:1.32.2
com.google.api-client:google-api-client-java6:1.35.1
com.google.api-client:google-api-client-servlet:1.35.1
com.google.api-client:google-api-client:1.35.1
com.google.api-client:google-api-client-java6:1.35.0
com.google.api-client:google-api-client-servlet:1.35.0
com.google.api-client:google-api-client:1.35.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1:2.10.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta1:0.134.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta2:0.134.0
@ -85,10 +84,9 @@ com.google.apis:google-api-services-iamcredentials:v1-rev20210326-1.32.1
com.google.apis:google-api-services-monitoring:v3-rev20220525-1.32.1
com.google.apis:google-api-services-pubsub:v1-rev20211130-1.32.1
com.google.apis:google-api-services-sheets:v4-rev20220411-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220513-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220323-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.appengine.tools:appengine-gcs-client:0.8.1
com.google.appengine.tools:appengine-mapreduce:0.9
com.google.appengine.tools:appengine-pipeline:0.2.13
com.google.appengine:appengine-api-1.0-sdk:2.0.5
com.google.appengine:appengine-remote-api:2.0.5
@ -103,7 +101,7 @@ com.google.cloud.bigdataoss:util:2.2.6
com.google.cloud.bigtable:bigtable-client-core:1.26.3
com.google.cloud.bigtable:bigtable-metrics-api:1.26.3
com.google.cloud.datastore:datastore-v1-proto-client:2.1.3
com.google.cloud.sql:jdbc-socket-factory-core:1.6.1
com.google.cloud.sql:jdbc-socket-factory-core:1.6.0
com.google.cloud:google-cloud-bigquerystorage:2.10.0
com.google.cloud:google-cloud-bigtable:2.5.3
com.google.cloud:google-cloud-core-grpc:2.4.0
@ -115,7 +113,7 @@ com.google.cloud:google-cloud-pubsub:1.116.0
com.google.cloud:google-cloud-pubsublite:1.5.0
com.google.cloud:google-cloud-secretmanager:2.2.0
com.google.cloud:google-cloud-spanner:6.20.0
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.cloud:google-cloud-tasks:2.2.0
com.google.cloud:grpc-gcp:1.1.0
com.google.cloud:proto-google-cloud-firestore-bundle-v1:3.0.14
@ -123,7 +121,7 @@ com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6
com.google.dagger:dagger:2.42
com.google.errorprone:error_prone_annotations:2.14.0
com.google.errorprone:error_prone_annotations:2.13.1
com.google.escapevelocity:escapevelocity:0.9.1
com.google.flatbuffers:flatbuffers-java:1.12.0
com.google.flogger:flogger-system-backend:0.7.4
@ -133,28 +131,27 @@ com.google.guava:failureaccess:1.0.1
com.google.guava:guava:31.1-jre
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
com.google.gwt:gwt-user:2.9.0
com.google.http-client:google-http-client-apache-v2:1.42.0
com.google.http-client:google-http-client-appengine:1.42.0
com.google.http-client:google-http-client-gson:1.42.0
com.google.http-client:google-http-client-apache-v2:1.41.8
com.google.http-client:google-http-client-appengine:1.41.8
com.google.http-client:google-http-client-gson:1.41.8
com.google.http-client:google-http-client-jackson2:1.41.8
com.google.http-client:google-http-client-protobuf:1.40.1
com.google.http-client:google-http-client:1.42.0
com.google.http-client:google-http-client:1.41.8
com.google.inject.extensions:guice-multibindings:4.1.0
com.google.inject:guice:4.1.0
com.google.j2objc:j2objc-annotations:1.3
com.google.jsinterop:jsinterop-annotations:2.0.0
com.google.monitoring-client:metrics:1.0.7
com.google.monitoring-client:stackdriver:1.0.7
com.google.oauth-client:google-oauth-client-appengine:1.34.1
com.google.oauth-client:google-oauth-client-java6:1.34.1
com.google.oauth-client:google-oauth-client-jetty:1.34.1
com.google.oauth-client:google-oauth-client-servlet:1.34.1
com.google.oauth-client:google-oauth-client:1.34.1
com.google.oauth-client:google-oauth-client-appengine:1.34.0
com.google.oauth-client:google-oauth-client-java6:1.34.0
com.google.oauth-client:google-oauth-client-jetty:1.34.0
com.google.oauth-client:google-oauth-client-servlet:1.34.0
com.google.oauth-client:google-oauth-client:1.34.0
com.google.protobuf:protobuf-java-util:3.20.1
com.google.protobuf:protobuf-java:3.20.1
com.google.re2j:re2j:1.6
com.google.template:soy:2021-02-01
com.googlecode.charts4j:charts4j:1.3
com.googlecode.json-simple:json-simple:1.1.1
com.ibm.icu:icu4j:71.1
com.jcraft:jsch:0.1.55
@ -218,7 +215,6 @@ io.opencensus:opencensus-impl-core:0.31.0
io.opencensus:opencensus-impl:0.31.0
io.opencensus:opencensus-proto:0.2.0
io.perfmark:perfmark-api:0.25.0
it.unimi.dsi:fastutil:6.5.16
javax.activation:activation:1.1
javax.activation:javax.activation-api:1.2.0
javax.annotation:javax.annotation-api:1.3.2
@ -272,7 +268,6 @@ org.codehaus.jackson:jackson-core-asl:1.9.13
org.codehaus.jackson:jackson-mapper-asl:1.9.13
org.codehaus.mojo:animal-sniffer-annotations:1.21
org.conscrypt:conscrypt-openjdk-uber:2.5.1
org.easymock:easymock:3.0
org.flywaydb:flyway-core:8.5.12
org.glassfish.jaxb:jaxb-runtime:2.3.1
org.glassfish.jaxb:txw2:2.3.1
@ -289,13 +284,12 @@ org.joda:joda-money:1.0.1
org.json:json:20200518
org.jsoup:jsoup:1.15.1
org.jvnet.staxex:stax-ex:1.8
org.objenesis:objenesis:1.2
org.ow2.asm:asm-analysis:9.3
org.ow2.asm:asm-commons:9.2
org.ow2.asm:asm-tree:9.3
org.ow2.asm:asm-util:9.3
org.ow2.asm:asm:9.3
org.postgresql:postgresql:42.4.0
org.postgresql:postgresql:42.3.6
org.rnorth.duct-tape:duct-tape:1.0.8
org.slf4j:jcl-over-slf4j:1.7.30
org.slf4j:jul-to-slf4j:1.7.30

View file

@ -4,7 +4,6 @@
antlr:antlr:2.7.7
aopalliance:aopalliance:1.0
args4j:args4j:2.0.23
cglib:cglib-nodep:2.2
com.101tec:zkclient:0.10
com.beust:jcommander:1.60
com.fasterxml.jackson.core:jackson-annotations:2.13.3
@ -28,11 +27,11 @@ com.github.jnr:jnr-posix:3.1.15
com.github.jnr:jnr-unixsocket:0.38.17
com.github.jnr:jnr-x86asm:1.0.2
com.google.android:annotations:4.1.1.4
com.google.api-client:google-api-client-appengine:1.35.1
com.google.api-client:google-api-client-appengine:1.35.0
com.google.api-client:google-api-client-jackson2:1.32.2
com.google.api-client:google-api-client-java6:1.35.1
com.google.api-client:google-api-client-servlet:1.35.1
com.google.api-client:google-api-client:1.35.1
com.google.api-client:google-api-client-java6:1.35.0
com.google.api-client:google-api-client-servlet:1.35.0
com.google.api-client:google-api-client:1.35.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1:2.10.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta1:0.134.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta2:0.134.0
@ -85,10 +84,9 @@ com.google.apis:google-api-services-iamcredentials:v1-rev20210326-1.32.1
com.google.apis:google-api-services-monitoring:v3-rev20220525-1.32.1
com.google.apis:google-api-services-pubsub:v1-rev20211130-1.32.1
com.google.apis:google-api-services-sheets:v4-rev20220411-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220513-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220323-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.appengine.tools:appengine-gcs-client:0.8.1
com.google.appengine.tools:appengine-mapreduce:0.9
com.google.appengine.tools:appengine-pipeline:0.2.13
com.google.appengine:appengine-api-1.0-sdk:2.0.5
com.google.appengine:appengine-remote-api:2.0.5
@ -103,7 +101,7 @@ com.google.cloud.bigdataoss:util:2.2.6
com.google.cloud.bigtable:bigtable-client-core:1.26.3
com.google.cloud.bigtable:bigtable-metrics-api:1.26.3
com.google.cloud.datastore:datastore-v1-proto-client:2.1.3
com.google.cloud.sql:jdbc-socket-factory-core:1.6.1
com.google.cloud.sql:jdbc-socket-factory-core:1.6.0
com.google.cloud:google-cloud-bigquerystorage:2.10.0
com.google.cloud:google-cloud-bigtable:2.5.3
com.google.cloud:google-cloud-core-grpc:2.4.0
@ -115,7 +113,7 @@ com.google.cloud:google-cloud-pubsub:1.116.0
com.google.cloud:google-cloud-pubsublite:1.5.0
com.google.cloud:google-cloud-secretmanager:2.2.0
com.google.cloud:google-cloud-spanner:6.20.0
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.cloud:google-cloud-tasks:2.2.0
com.google.cloud:grpc-gcp:1.1.0
com.google.cloud:proto-google-cloud-firestore-bundle-v1:3.0.14
@ -123,7 +121,7 @@ com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6
com.google.dagger:dagger:2.42
com.google.errorprone:error_prone_annotations:2.14.0
com.google.errorprone:error_prone_annotations:2.13.1
com.google.escapevelocity:escapevelocity:0.9.1
com.google.flatbuffers:flatbuffers-java:1.12.0
com.google.flogger:flogger-system-backend:0.7.4
@ -133,28 +131,27 @@ com.google.guava:failureaccess:1.0.1
com.google.guava:guava:31.1-jre
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
com.google.gwt:gwt-user:2.9.0
com.google.http-client:google-http-client-apache-v2:1.42.0
com.google.http-client:google-http-client-appengine:1.42.0
com.google.http-client:google-http-client-gson:1.42.0
com.google.http-client:google-http-client-apache-v2:1.41.8
com.google.http-client:google-http-client-appengine:1.41.8
com.google.http-client:google-http-client-gson:1.41.8
com.google.http-client:google-http-client-jackson2:1.41.8
com.google.http-client:google-http-client-protobuf:1.40.1
com.google.http-client:google-http-client:1.42.0
com.google.http-client:google-http-client:1.41.8
com.google.inject.extensions:guice-multibindings:4.1.0
com.google.inject:guice:4.1.0
com.google.j2objc:j2objc-annotations:1.3
com.google.jsinterop:jsinterop-annotations:2.0.0
com.google.monitoring-client:metrics:1.0.7
com.google.monitoring-client:stackdriver:1.0.7
com.google.oauth-client:google-oauth-client-appengine:1.34.1
com.google.oauth-client:google-oauth-client-java6:1.34.1
com.google.oauth-client:google-oauth-client-jetty:1.34.1
com.google.oauth-client:google-oauth-client-servlet:1.34.1
com.google.oauth-client:google-oauth-client:1.34.1
com.google.oauth-client:google-oauth-client-appengine:1.34.0
com.google.oauth-client:google-oauth-client-java6:1.34.0
com.google.oauth-client:google-oauth-client-jetty:1.34.0
com.google.oauth-client:google-oauth-client-servlet:1.34.0
com.google.oauth-client:google-oauth-client:1.34.0
com.google.protobuf:protobuf-java-util:3.20.1
com.google.protobuf:protobuf-java:3.20.1
com.google.re2j:re2j:1.6
com.google.template:soy:2021-02-01
com.googlecode.charts4j:charts4j:1.3
com.googlecode.json-simple:json-simple:1.1.1
com.ibm.icu:icu4j:71.1
com.jcraft:jsch:0.1.55
@ -218,7 +215,6 @@ io.opencensus:opencensus-impl-core:0.31.0
io.opencensus:opencensus-impl:0.31.0
io.opencensus:opencensus-proto:0.2.0
io.perfmark:perfmark-api:0.25.0
it.unimi.dsi:fastutil:6.5.16
javax.activation:activation:1.1
javax.activation:javax.activation-api:1.2.0
javax.annotation:javax.annotation-api:1.3.2
@ -272,7 +268,6 @@ org.codehaus.jackson:jackson-core-asl:1.9.13
org.codehaus.jackson:jackson-mapper-asl:1.9.13
org.codehaus.mojo:animal-sniffer-annotations:1.21
org.conscrypt:conscrypt-openjdk-uber:2.5.1
org.easymock:easymock:3.0
org.flywaydb:flyway-core:8.5.12
org.glassfish.jaxb:jaxb-runtime:2.3.1
org.glassfish.jaxb:txw2:2.3.1
@ -289,13 +284,12 @@ org.joda:joda-money:1.0.1
org.json:json:20200518
org.jsoup:jsoup:1.15.1
org.jvnet.staxex:stax-ex:1.8
org.objenesis:objenesis:1.2
org.ow2.asm:asm-analysis:9.3
org.ow2.asm:asm-commons:9.2
org.ow2.asm:asm-tree:9.3
org.ow2.asm:asm-util:9.3
org.ow2.asm:asm:9.3
org.postgresql:postgresql:42.4.0
org.postgresql:postgresql:42.3.6
org.rnorth.duct-tape:duct-tape:1.0.8
org.slf4j:jcl-over-slf4j:1.7.30
org.slf4j:jul-to-slf4j:1.7.30

View file

@ -4,7 +4,6 @@
antlr:antlr:2.7.7
aopalliance:aopalliance:1.0
args4j:args4j:2.0.23
cglib:cglib-nodep:2.2
com.101tec:zkclient:0.10
com.beust:jcommander:1.60
com.fasterxml.jackson.core:jackson-annotations:2.13.3
@ -28,11 +27,11 @@ com.github.jnr:jnr-posix:3.1.15
com.github.jnr:jnr-unixsocket:0.38.17
com.github.jnr:jnr-x86asm:1.0.2
com.google.android:annotations:4.1.1.4
com.google.api-client:google-api-client-appengine:1.35.1
com.google.api-client:google-api-client-appengine:1.35.0
com.google.api-client:google-api-client-jackson2:1.32.2
com.google.api-client:google-api-client-java6:1.35.1
com.google.api-client:google-api-client-servlet:1.35.1
com.google.api-client:google-api-client:1.35.1
com.google.api-client:google-api-client-java6:1.35.0
com.google.api-client:google-api-client-servlet:1.35.0
com.google.api-client:google-api-client:1.35.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1:2.10.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta1:0.134.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta2:0.134.0
@ -85,10 +84,9 @@ com.google.apis:google-api-services-iamcredentials:v1-rev20210326-1.32.1
com.google.apis:google-api-services-monitoring:v3-rev20220525-1.32.1
com.google.apis:google-api-services-pubsub:v1-rev20211130-1.32.1
com.google.apis:google-api-services-sheets:v4-rev20220411-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220513-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220323-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.appengine.tools:appengine-gcs-client:0.8.1
com.google.appengine.tools:appengine-mapreduce:0.9
com.google.appengine.tools:appengine-pipeline:0.2.13
com.google.appengine:appengine-api-1.0-sdk:2.0.5
com.google.appengine:appengine-remote-api:2.0.5
@ -103,7 +101,7 @@ com.google.cloud.bigdataoss:util:2.2.6
com.google.cloud.bigtable:bigtable-client-core:1.26.3
com.google.cloud.bigtable:bigtable-metrics-api:1.26.3
com.google.cloud.datastore:datastore-v1-proto-client:2.1.3
com.google.cloud.sql:jdbc-socket-factory-core:1.6.1
com.google.cloud.sql:jdbc-socket-factory-core:1.6.0
com.google.cloud:google-cloud-bigquerystorage:2.10.0
com.google.cloud:google-cloud-bigtable:2.5.3
com.google.cloud:google-cloud-core-grpc:2.4.0
@ -115,7 +113,7 @@ com.google.cloud:google-cloud-pubsub:1.116.0
com.google.cloud:google-cloud-pubsublite:1.5.0
com.google.cloud:google-cloud-secretmanager:2.2.0
com.google.cloud:google-cloud-spanner:6.20.0
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.cloud:google-cloud-tasks:2.2.0
com.google.cloud:grpc-gcp:1.1.0
com.google.cloud:proto-google-cloud-firestore-bundle-v1:3.0.14
@ -123,7 +121,7 @@ com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6
com.google.dagger:dagger:2.42
com.google.errorprone:error_prone_annotations:2.14.0
com.google.errorprone:error_prone_annotations:2.13.1
com.google.escapevelocity:escapevelocity:0.9.1
com.google.flatbuffers:flatbuffers-java:1.12.0
com.google.flogger:flogger-system-backend:0.7.4
@ -133,28 +131,27 @@ com.google.guava:failureaccess:1.0.1
com.google.guava:guava:31.1-jre
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
com.google.gwt:gwt-user:2.9.0
com.google.http-client:google-http-client-apache-v2:1.42.0
com.google.http-client:google-http-client-appengine:1.42.0
com.google.http-client:google-http-client-gson:1.42.0
com.google.http-client:google-http-client-apache-v2:1.41.8
com.google.http-client:google-http-client-appengine:1.41.8
com.google.http-client:google-http-client-gson:1.41.8
com.google.http-client:google-http-client-jackson2:1.41.8
com.google.http-client:google-http-client-protobuf:1.40.1
com.google.http-client:google-http-client:1.42.0
com.google.http-client:google-http-client:1.41.8
com.google.inject.extensions:guice-multibindings:4.1.0
com.google.inject:guice:4.1.0
com.google.j2objc:j2objc-annotations:1.3
com.google.jsinterop:jsinterop-annotations:2.0.0
com.google.monitoring-client:metrics:1.0.7
com.google.monitoring-client:stackdriver:1.0.7
com.google.oauth-client:google-oauth-client-appengine:1.34.1
com.google.oauth-client:google-oauth-client-java6:1.34.1
com.google.oauth-client:google-oauth-client-jetty:1.34.1
com.google.oauth-client:google-oauth-client-servlet:1.34.1
com.google.oauth-client:google-oauth-client:1.34.1
com.google.oauth-client:google-oauth-client-appengine:1.34.0
com.google.oauth-client:google-oauth-client-java6:1.34.0
com.google.oauth-client:google-oauth-client-jetty:1.34.0
com.google.oauth-client:google-oauth-client-servlet:1.34.0
com.google.oauth-client:google-oauth-client:1.34.0
com.google.protobuf:protobuf-java-util:3.20.1
com.google.protobuf:protobuf-java:3.20.1
com.google.re2j:re2j:1.6
com.google.template:soy:2021-02-01
com.googlecode.charts4j:charts4j:1.3
com.googlecode.json-simple:json-simple:1.1.1
com.ibm.icu:icu4j:71.1
com.jcraft:jsch:0.1.55
@ -218,7 +215,6 @@ io.opencensus:opencensus-impl-core:0.31.0
io.opencensus:opencensus-impl:0.31.0
io.opencensus:opencensus-proto:0.2.0
io.perfmark:perfmark-api:0.25.0
it.unimi.dsi:fastutil:6.5.16
javax.activation:activation:1.1
javax.activation:javax.activation-api:1.2.0
javax.annotation:javax.annotation-api:1.3.2
@ -272,7 +268,6 @@ org.codehaus.jackson:jackson-core-asl:1.9.13
org.codehaus.jackson:jackson-mapper-asl:1.9.13
org.codehaus.mojo:animal-sniffer-annotations:1.21
org.conscrypt:conscrypt-openjdk-uber:2.5.1
org.easymock:easymock:3.0
org.flywaydb:flyway-core:8.5.12
org.glassfish.jaxb:jaxb-runtime:2.3.1
org.glassfish.jaxb:txw2:2.3.1
@ -289,13 +284,12 @@ org.joda:joda-money:1.0.1
org.json:json:20200518
org.jsoup:jsoup:1.15.1
org.jvnet.staxex:stax-ex:1.8
org.objenesis:objenesis:1.2
org.ow2.asm:asm-analysis:9.3
org.ow2.asm:asm-commons:9.2
org.ow2.asm:asm-tree:9.3
org.ow2.asm:asm-util:9.3
org.ow2.asm:asm:9.3
org.postgresql:postgresql:42.4.0
org.postgresql:postgresql:42.3.6
org.rnorth.duct-tape:duct-tape:1.0.8
org.slf4j:jcl-over-slf4j:1.7.30
org.slf4j:jul-to-slf4j:1.7.30

View file

@ -4,7 +4,6 @@
antlr:antlr:2.7.7
aopalliance:aopalliance:1.0
args4j:args4j:2.0.23
cglib:cglib-nodep:2.2
com.101tec:zkclient:0.10
com.beust:jcommander:1.60
com.fasterxml.jackson.core:jackson-annotations:2.13.3
@ -28,11 +27,11 @@ com.github.jnr:jnr-posix:3.1.15
com.github.jnr:jnr-unixsocket:0.38.17
com.github.jnr:jnr-x86asm:1.0.2
com.google.android:annotations:4.1.1.4
com.google.api-client:google-api-client-appengine:1.35.1
com.google.api-client:google-api-client-appengine:1.35.0
com.google.api-client:google-api-client-jackson2:1.32.2
com.google.api-client:google-api-client-java6:1.35.1
com.google.api-client:google-api-client-servlet:1.35.1
com.google.api-client:google-api-client:1.35.1
com.google.api-client:google-api-client-java6:1.35.0
com.google.api-client:google-api-client-servlet:1.35.0
com.google.api-client:google-api-client:1.35.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1:2.10.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta1:0.134.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta2:0.134.0
@ -85,10 +84,9 @@ com.google.apis:google-api-services-iamcredentials:v1-rev20210326-1.32.1
com.google.apis:google-api-services-monitoring:v3-rev20220525-1.32.1
com.google.apis:google-api-services-pubsub:v1-rev20211130-1.32.1
com.google.apis:google-api-services-sheets:v4-rev20220411-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220513-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220323-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.appengine.tools:appengine-gcs-client:0.8.1
com.google.appengine.tools:appengine-mapreduce:0.9
com.google.appengine.tools:appengine-pipeline:0.2.13
com.google.appengine:appengine-api-1.0-sdk:2.0.5
com.google.appengine:appengine-remote-api:2.0.5
@ -103,8 +101,8 @@ com.google.cloud.bigdataoss:util:2.2.6
com.google.cloud.bigtable:bigtable-client-core:1.26.3
com.google.cloud.bigtable:bigtable-metrics-api:1.26.3
com.google.cloud.datastore:datastore-v1-proto-client:2.1.3
com.google.cloud.sql:jdbc-socket-factory-core:1.6.1
com.google.cloud.sql:postgres-socket-factory:1.6.1
com.google.cloud.sql:jdbc-socket-factory-core:1.6.0
com.google.cloud.sql:postgres-socket-factory:1.6.0
com.google.cloud:google-cloud-bigquerystorage:2.10.0
com.google.cloud:google-cloud-bigtable:2.5.3
com.google.cloud:google-cloud-core-grpc:2.4.0
@ -116,7 +114,7 @@ com.google.cloud:google-cloud-pubsub:1.116.0
com.google.cloud:google-cloud-pubsublite:1.5.0
com.google.cloud:google-cloud-secretmanager:2.2.0
com.google.cloud:google-cloud-spanner:6.20.0
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.cloud:google-cloud-tasks:2.2.0
com.google.cloud:grpc-gcp:1.1.0
com.google.cloud:proto-google-cloud-firestore-bundle-v1:3.0.14
@ -124,7 +122,7 @@ com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6
com.google.dagger:dagger:2.42
com.google.errorprone:error_prone_annotations:2.14.0
com.google.errorprone:error_prone_annotations:2.13.1
com.google.escapevelocity:escapevelocity:0.9.1
com.google.flatbuffers:flatbuffers-java:1.12.0
com.google.flogger:flogger-system-backend:0.7.4
@ -134,28 +132,27 @@ com.google.guava:failureaccess:1.0.1
com.google.guava:guava:31.1-jre
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
com.google.gwt:gwt-user:2.9.0
com.google.http-client:google-http-client-apache-v2:1.42.0
com.google.http-client:google-http-client-appengine:1.42.0
com.google.http-client:google-http-client-gson:1.42.0
com.google.http-client:google-http-client-apache-v2:1.41.8
com.google.http-client:google-http-client-appengine:1.41.8
com.google.http-client:google-http-client-gson:1.41.8
com.google.http-client:google-http-client-jackson2:1.41.8
com.google.http-client:google-http-client-protobuf:1.40.1
com.google.http-client:google-http-client:1.42.0
com.google.http-client:google-http-client:1.41.8
com.google.inject.extensions:guice-multibindings:4.1.0
com.google.inject:guice:4.1.0
com.google.j2objc:j2objc-annotations:1.3
com.google.jsinterop:jsinterop-annotations:2.0.0
com.google.monitoring-client:metrics:1.0.7
com.google.monitoring-client:stackdriver:1.0.7
com.google.oauth-client:google-oauth-client-appengine:1.34.1
com.google.oauth-client:google-oauth-client-java6:1.34.1
com.google.oauth-client:google-oauth-client-jetty:1.34.1
com.google.oauth-client:google-oauth-client-servlet:1.34.1
com.google.oauth-client:google-oauth-client:1.34.1
com.google.oauth-client:google-oauth-client-appengine:1.34.0
com.google.oauth-client:google-oauth-client-java6:1.34.0
com.google.oauth-client:google-oauth-client-jetty:1.34.0
com.google.oauth-client:google-oauth-client-servlet:1.34.0
com.google.oauth-client:google-oauth-client:1.34.0
com.google.protobuf:protobuf-java-util:3.20.1
com.google.protobuf:protobuf-java:3.20.1
com.google.re2j:re2j:1.6
com.google.template:soy:2021-02-01
com.googlecode.charts4j:charts4j:1.3
com.googlecode.json-simple:json-simple:1.1.1
com.ibm.icu:icu4j:71.1
com.jcraft:jsch:0.1.55
@ -219,7 +216,6 @@ io.opencensus:opencensus-impl-core:0.31.0
io.opencensus:opencensus-impl:0.31.0
io.opencensus:opencensus-proto:0.2.0
io.perfmark:perfmark-api:0.25.0
it.unimi.dsi:fastutil:6.5.16
javax.activation:activation:1.1
javax.activation:javax.activation-api:1.2.0
javax.annotation:javax.annotation-api:1.3.2
@ -272,7 +268,6 @@ org.codehaus.jackson:jackson-core-asl:1.9.13
org.codehaus.jackson:jackson-mapper-asl:1.9.13
org.codehaus.mojo:animal-sniffer-annotations:1.21
org.conscrypt:conscrypt-openjdk-uber:2.5.1
org.easymock:easymock:3.0
org.flywaydb:flyway-core:8.5.12
org.glassfish.jaxb:jaxb-runtime:2.3.1
org.glassfish.jaxb:txw2:2.3.1
@ -288,13 +283,12 @@ org.joda:joda-money:1.0.1
org.json:json:20200518
org.jsoup:jsoup:1.15.1
org.jvnet.staxex:stax-ex:1.8
org.objenesis:objenesis:1.2
org.ow2.asm:asm-analysis:9.3
org.ow2.asm:asm-commons:9.2
org.ow2.asm:asm-tree:9.3
org.ow2.asm:asm-util:9.3
org.ow2.asm:asm:9.3
org.postgresql:postgresql:42.4.0
org.postgresql:postgresql:42.3.6
org.rnorth.duct-tape:duct-tape:1.0.8
org.slf4j:jcl-over-slf4j:1.7.30
org.slf4j:jul-to-slf4j:1.7.30

View file

@ -4,7 +4,6 @@
antlr:antlr:2.7.7
aopalliance:aopalliance:1.0
args4j:args4j:2.0.23
cglib:cglib-nodep:2.2
com.101tec:zkclient:0.10
com.beust:jcommander:1.60
com.fasterxml.jackson.core:jackson-annotations:2.13.3
@ -28,11 +27,11 @@ com.github.jnr:jnr-posix:3.1.15
com.github.jnr:jnr-unixsocket:0.38.17
com.github.jnr:jnr-x86asm:1.0.2
com.google.android:annotations:4.1.1.4
com.google.api-client:google-api-client-appengine:1.35.1
com.google.api-client:google-api-client-appengine:1.35.0
com.google.api-client:google-api-client-jackson2:1.32.2
com.google.api-client:google-api-client-java6:1.35.1
com.google.api-client:google-api-client-servlet:1.35.1
com.google.api-client:google-api-client:1.35.1
com.google.api-client:google-api-client-java6:1.35.0
com.google.api-client:google-api-client-servlet:1.35.0
com.google.api-client:google-api-client:1.35.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1:2.10.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta1:0.134.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta2:0.134.0
@ -85,10 +84,9 @@ com.google.apis:google-api-services-iamcredentials:v1-rev20210326-1.32.1
com.google.apis:google-api-services-monitoring:v3-rev20220525-1.32.1
com.google.apis:google-api-services-pubsub:v1-rev20211130-1.32.1
com.google.apis:google-api-services-sheets:v4-rev20220411-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220513-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220323-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.appengine.tools:appengine-gcs-client:0.8.1
com.google.appengine.tools:appengine-mapreduce:0.9
com.google.appengine.tools:appengine-pipeline:0.2.13
com.google.appengine:appengine-api-1.0-sdk:2.0.5
com.google.appengine:appengine-api-stubs:2.0.5
@ -104,7 +102,7 @@ com.google.cloud.bigdataoss:util:2.2.6
com.google.cloud.bigtable:bigtable-client-core:1.26.3
com.google.cloud.bigtable:bigtable-metrics-api:1.26.3
com.google.cloud.datastore:datastore-v1-proto-client:2.1.3
com.google.cloud.sql:jdbc-socket-factory-core:1.6.1
com.google.cloud.sql:jdbc-socket-factory-core:1.6.0
com.google.cloud:google-cloud-bigquerystorage:2.10.0
com.google.cloud:google-cloud-bigtable:2.5.3
com.google.cloud:google-cloud-core-grpc:2.4.0
@ -112,12 +110,12 @@ com.google.cloud:google-cloud-core-http:2.7.1
com.google.cloud:google-cloud-core:2.7.1
com.google.cloud:google-cloud-firestore:3.0.14
com.google.cloud:google-cloud-monitoring:1.82.0
com.google.cloud:google-cloud-nio:0.124.4
com.google.cloud:google-cloud-nio:0.124.2
com.google.cloud:google-cloud-pubsub:1.116.0
com.google.cloud:google-cloud-pubsublite:1.5.0
com.google.cloud:google-cloud-secretmanager:2.2.0
com.google.cloud:google-cloud-spanner:6.20.0
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.cloud:google-cloud-tasks:2.2.0
com.google.cloud:grpc-gcp:1.1.0
com.google.cloud:proto-google-cloud-firestore-bundle-v1:3.0.14
@ -125,7 +123,7 @@ com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6
com.google.dagger:dagger:2.42
com.google.errorprone:error_prone_annotations:2.14.0
com.google.errorprone:error_prone_annotations:2.13.1
com.google.escapevelocity:escapevelocity:0.9.1
com.google.flatbuffers:flatbuffers-java:1.12.0
com.google.flogger:flogger-system-backend:0.7.4
@ -136,12 +134,12 @@ com.google.guava:guava-testlib:31.1-jre
com.google.guava:guava:31.1-jre
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
com.google.gwt:gwt-user:2.9.0
com.google.http-client:google-http-client-apache-v2:1.42.0
com.google.http-client:google-http-client-appengine:1.42.0
com.google.http-client:google-http-client-gson:1.42.0
com.google.http-client:google-http-client-apache-v2:1.41.8
com.google.http-client:google-http-client-appengine:1.41.8
com.google.http-client:google-http-client-gson:1.41.8
com.google.http-client:google-http-client-jackson2:1.41.8
com.google.http-client:google-http-client-protobuf:1.40.1
com.google.http-client:google-http-client:1.42.0
com.google.http-client:google-http-client:1.41.8
com.google.inject.extensions:guice-multibindings:4.1.0
com.google.inject:guice:4.1.0
com.google.j2objc:j2objc-annotations:1.3
@ -149,18 +147,17 @@ com.google.jsinterop:jsinterop-annotations:2.0.0
com.google.monitoring-client:contrib:1.0.7
com.google.monitoring-client:metrics:1.0.7
com.google.monitoring-client:stackdriver:1.0.7
com.google.oauth-client:google-oauth-client-appengine:1.34.1
com.google.oauth-client:google-oauth-client-java6:1.34.1
com.google.oauth-client:google-oauth-client-jetty:1.34.1
com.google.oauth-client:google-oauth-client-servlet:1.34.1
com.google.oauth-client:google-oauth-client:1.34.1
com.google.oauth-client:google-oauth-client-appengine:1.34.0
com.google.oauth-client:google-oauth-client-java6:1.34.0
com.google.oauth-client:google-oauth-client-jetty:1.34.0
com.google.oauth-client:google-oauth-client-servlet:1.34.0
com.google.oauth-client:google-oauth-client:1.34.0
com.google.protobuf:protobuf-java-util:3.20.1
com.google.protobuf:protobuf-java:3.20.1
com.google.re2j:re2j:1.6
com.google.template:soy:2021-02-01
com.google.truth.extensions:truth-java8-extension:1.1.3
com.google.truth:truth:1.1.3
com.googlecode.charts4j:charts4j:1.3
com.googlecode.json-simple:json-simple:1.1.1
com.ibm.icu:icu4j:71.1
com.jcraft:jsch:0.1.55
@ -220,7 +217,6 @@ io.opencensus:opencensus-impl-core:0.31.0
io.opencensus:opencensus-impl:0.31.0
io.opencensus:opencensus-proto:0.2.0
io.perfmark:perfmark-api:0.25.0
it.unimi.dsi:fastutil:6.5.16
javax.activation:activation:1.1
javax.activation:javax.activation-api:1.2.0
javax.annotation:javax.annotation-api:1.3.2
@ -273,7 +269,7 @@ org.apache.mina:mina-core:2.1.6
org.apache.sshd:sshd-core:2.0.0
org.apache.sshd:sshd-scp:2.0.0
org.apache.sshd:sshd-sftp:2.0.0
org.apache.tomcat:tomcat-annotations-api:10.1.0-M16
org.apache.tomcat:tomcat-annotations-api:10.1.0-M15
org.bouncycastle:bcpg-jdk15on:1.67
org.bouncycastle:bcpkix-jdk15on:1.67
org.bouncycastle:bcprov-jdk15on:1.67
@ -283,7 +279,6 @@ org.codehaus.jackson:jackson-core-asl:1.9.13
org.codehaus.jackson:jackson-mapper-asl:1.9.13
org.codehaus.mojo:animal-sniffer-annotations:1.21
org.conscrypt:conscrypt-openjdk-uber:2.5.1
org.easymock:easymock:3.0
org.flywaydb:flyway-core:8.5.12
org.glassfish.jaxb:jaxb-runtime:2.3.1
org.glassfish.jaxb:txw2:2.3.1
@ -324,7 +319,7 @@ org.ow2.asm:asm-commons:9.2
org.ow2.asm:asm-tree:9.3
org.ow2.asm:asm-util:9.3
org.ow2.asm:asm:9.3
org.postgresql:postgresql:42.4.0
org.postgresql:postgresql:42.3.6
org.rnorth.duct-tape:duct-tape:1.0.8
org.seleniumhq.selenium:selenium-api:3.141.59
org.seleniumhq.selenium:selenium-chrome-driver:3.141.59

View file

@ -4,7 +4,6 @@
antlr:antlr:2.7.7
aopalliance:aopalliance:1.0
args4j:args4j:2.0.23
cglib:cglib-nodep:2.2
com.101tec:zkclient:0.10
com.beust:jcommander:1.60
com.fasterxml.jackson.core:jackson-annotations:2.13.3
@ -27,11 +26,11 @@ com.github.jnr:jnr-ffi:2.2.11
com.github.jnr:jnr-posix:3.1.15
com.github.jnr:jnr-unixsocket:0.38.17
com.github.jnr:jnr-x86asm:1.0.2
com.google.api-client:google-api-client-appengine:1.35.1
com.google.api-client:google-api-client-appengine:1.35.0
com.google.api-client:google-api-client-jackson2:1.32.2
com.google.api-client:google-api-client-java6:1.35.1
com.google.api-client:google-api-client-servlet:1.35.1
com.google.api-client:google-api-client:1.35.1
com.google.api-client:google-api-client-java6:1.35.0
com.google.api-client:google-api-client-servlet:1.35.0
com.google.api-client:google-api-client:1.35.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1:2.10.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta1:0.134.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta2:0.134.0
@ -84,10 +83,9 @@ com.google.apis:google-api-services-iamcredentials:v1-rev20210326-1.32.1
com.google.apis:google-api-services-monitoring:v3-rev20220525-1.32.1
com.google.apis:google-api-services-pubsub:v1-rev20211130-1.32.1
com.google.apis:google-api-services-sheets:v4-rev20220411-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220513-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220323-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.appengine.tools:appengine-gcs-client:0.8.1
com.google.appengine.tools:appengine-mapreduce:0.9
com.google.appengine.tools:appengine-pipeline:0.2.13
com.google.appengine:appengine-api-1.0-sdk:2.0.5
com.google.appengine:appengine-api-stubs:2.0.5
@ -103,7 +101,7 @@ com.google.cloud.bigdataoss:util:2.2.6
com.google.cloud.bigtable:bigtable-client-core:1.26.3
com.google.cloud.bigtable:bigtable-metrics-api:1.26.3
com.google.cloud.datastore:datastore-v1-proto-client:2.1.3
com.google.cloud.sql:jdbc-socket-factory-core:1.6.1
com.google.cloud.sql:jdbc-socket-factory-core:1.6.0
com.google.cloud:google-cloud-bigquerystorage:2.10.0
com.google.cloud:google-cloud-bigtable:2.5.3
com.google.cloud:google-cloud-core-grpc:2.4.0
@ -111,12 +109,12 @@ com.google.cloud:google-cloud-core-http:2.7.1
com.google.cloud:google-cloud-core:2.7.1
com.google.cloud:google-cloud-firestore:3.0.14
com.google.cloud:google-cloud-monitoring:1.82.0
com.google.cloud:google-cloud-nio:0.124.4
com.google.cloud:google-cloud-nio:0.124.2
com.google.cloud:google-cloud-pubsub:1.116.0
com.google.cloud:google-cloud-pubsublite:1.5.0
com.google.cloud:google-cloud-secretmanager:2.2.0
com.google.cloud:google-cloud-spanner:6.20.0
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.cloud:google-cloud-tasks:2.2.0
com.google.cloud:grpc-gcp:1.1.0
com.google.cloud:proto-google-cloud-firestore-bundle-v1:3.0.14
@ -124,7 +122,7 @@ com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6
com.google.dagger:dagger:2.42
com.google.errorprone:error_prone_annotations:2.14.0
com.google.errorprone:error_prone_annotations:2.13.1
com.google.escapevelocity:escapevelocity:0.9.1
com.google.flatbuffers:flatbuffers-java:1.12.0
com.google.flogger:flogger-system-backend:0.7.4
@ -135,12 +133,12 @@ com.google.guava:guava-testlib:31.1-jre
com.google.guava:guava:31.1-jre
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
com.google.gwt:gwt-user:2.9.0
com.google.http-client:google-http-client-apache-v2:1.42.0
com.google.http-client:google-http-client-appengine:1.42.0
com.google.http-client:google-http-client-gson:1.42.0
com.google.http-client:google-http-client-apache-v2:1.41.8
com.google.http-client:google-http-client-appengine:1.41.8
com.google.http-client:google-http-client-gson:1.41.8
com.google.http-client:google-http-client-jackson2:1.41.8
com.google.http-client:google-http-client-protobuf:1.40.1
com.google.http-client:google-http-client:1.42.0
com.google.http-client:google-http-client:1.41.8
com.google.inject.extensions:guice-multibindings:4.1.0
com.google.inject:guice:4.1.0
com.google.j2objc:j2objc-annotations:1.3
@ -148,18 +146,17 @@ com.google.jsinterop:jsinterop-annotations:2.0.0
com.google.monitoring-client:contrib:1.0.7
com.google.monitoring-client:metrics:1.0.7
com.google.monitoring-client:stackdriver:1.0.7
com.google.oauth-client:google-oauth-client-appengine:1.34.1
com.google.oauth-client:google-oauth-client-java6:1.34.1
com.google.oauth-client:google-oauth-client-jetty:1.34.1
com.google.oauth-client:google-oauth-client-servlet:1.34.1
com.google.oauth-client:google-oauth-client:1.34.1
com.google.oauth-client:google-oauth-client-appengine:1.34.0
com.google.oauth-client:google-oauth-client-java6:1.34.0
com.google.oauth-client:google-oauth-client-jetty:1.34.0
com.google.oauth-client:google-oauth-client-servlet:1.34.0
com.google.oauth-client:google-oauth-client:1.34.0
com.google.protobuf:protobuf-java-util:3.20.1
com.google.protobuf:protobuf-java:3.20.1
com.google.re2j:re2j:1.6
com.google.template:soy:2021-02-01
com.google.truth.extensions:truth-java8-extension:1.1.3
com.google.truth:truth:1.1.3
com.googlecode.charts4j:charts4j:1.3
com.googlecode.json-simple:json-simple:1.1.1
com.ibm.icu:icu4j:71.1
com.jcraft:jsch:0.1.55
@ -215,7 +212,6 @@ io.opencensus:opencensus-exporter-stats-stackdriver:0.31.0
io.opencensus:opencensus-impl-core:0.31.0
io.opencensus:opencensus-impl:0.31.0
io.opencensus:opencensus-proto:0.2.0
it.unimi.dsi:fastutil:6.5.16
javax.activation:activation:1.1
javax.activation:javax.activation-api:1.2.0
javax.annotation:javax.annotation-api:1.3.2
@ -268,7 +264,7 @@ org.apache.mina:mina-core:2.1.6
org.apache.sshd:sshd-core:2.0.0
org.apache.sshd:sshd-scp:2.0.0
org.apache.sshd:sshd-sftp:2.0.0
org.apache.tomcat:tomcat-annotations-api:10.1.0-M16
org.apache.tomcat:tomcat-annotations-api:10.1.0-M15
org.apiguardian:apiguardian-api:1.1.2
org.bouncycastle:bcpg-jdk15on:1.67
org.bouncycastle:bcpkix-jdk15on:1.67
@ -278,7 +274,6 @@ org.checkerframework:checker-qual:3.22.0
org.codehaus.jackson:jackson-core-asl:1.9.13
org.codehaus.jackson:jackson-mapper-asl:1.9.13
org.conscrypt:conscrypt-openjdk-uber:2.5.1
org.easymock:easymock:3.0
org.flywaydb:flyway-core:8.5.12
org.glassfish.jaxb:jaxb-runtime:2.3.1
org.glassfish.jaxb:txw2:2.3.1
@ -311,14 +306,13 @@ org.mockito:mockito-core:4.6.1
org.mockito:mockito-junit-jupiter:4.6.1
org.mortbay.jetty:jetty-util:6.1.26
org.mortbay.jetty:jetty:6.1.26
org.objenesis:objenesis:1.2
org.opentest4j:opentest4j:1.2.0
org.ow2.asm:asm-analysis:9.3
org.ow2.asm:asm-commons:9.2
org.ow2.asm:asm-tree:9.3
org.ow2.asm:asm-util:9.3
org.ow2.asm:asm:9.3
org.postgresql:postgresql:42.4.0
org.postgresql:postgresql:42.3.6
org.rnorth.duct-tape:duct-tape:1.0.8
org.seleniumhq.selenium:selenium-api:3.141.59
org.seleniumhq.selenium:selenium-chrome-driver:3.141.59

View file

@ -4,7 +4,6 @@
antlr:antlr:2.7.7
aopalliance:aopalliance:1.0
args4j:args4j:2.0.23
cglib:cglib-nodep:2.2
com.101tec:zkclient:0.10
com.beust:jcommander:1.60
com.fasterxml.jackson.core:jackson-annotations:2.13.3
@ -28,11 +27,11 @@ com.github.jnr:jnr-posix:3.1.15
com.github.jnr:jnr-unixsocket:0.38.17
com.github.jnr:jnr-x86asm:1.0.2
com.google.android:annotations:4.1.1.4
com.google.api-client:google-api-client-appengine:1.35.1
com.google.api-client:google-api-client-appengine:1.35.0
com.google.api-client:google-api-client-jackson2:1.32.2
com.google.api-client:google-api-client-java6:1.35.1
com.google.api-client:google-api-client-servlet:1.35.1
com.google.api-client:google-api-client:1.35.1
com.google.api-client:google-api-client-java6:1.35.0
com.google.api-client:google-api-client-servlet:1.35.0
com.google.api-client:google-api-client:1.35.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1:2.10.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta1:0.134.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta2:0.134.0
@ -85,10 +84,9 @@ com.google.apis:google-api-services-iamcredentials:v1-rev20210326-1.32.1
com.google.apis:google-api-services-monitoring:v3-rev20220525-1.32.1
com.google.apis:google-api-services-pubsub:v1-rev20211130-1.32.1
com.google.apis:google-api-services-sheets:v4-rev20220411-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220513-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220323-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.appengine.tools:appengine-gcs-client:0.8.1
com.google.appengine.tools:appengine-mapreduce:0.9
com.google.appengine.tools:appengine-pipeline:0.2.13
com.google.appengine:appengine-api-1.0-sdk:2.0.5
com.google.appengine:appengine-api-stubs:2.0.5
@ -104,8 +102,8 @@ com.google.cloud.bigdataoss:util:2.2.6
com.google.cloud.bigtable:bigtable-client-core:1.26.3
com.google.cloud.bigtable:bigtable-metrics-api:1.26.3
com.google.cloud.datastore:datastore-v1-proto-client:2.1.3
com.google.cloud.sql:jdbc-socket-factory-core:1.6.1
com.google.cloud.sql:postgres-socket-factory:1.6.1
com.google.cloud.sql:jdbc-socket-factory-core:1.6.0
com.google.cloud.sql:postgres-socket-factory:1.6.0
com.google.cloud:google-cloud-bigquerystorage:2.10.0
com.google.cloud:google-cloud-bigtable:2.5.3
com.google.cloud:google-cloud-core-grpc:2.4.0
@ -113,12 +111,12 @@ com.google.cloud:google-cloud-core-http:2.7.1
com.google.cloud:google-cloud-core:2.7.1
com.google.cloud:google-cloud-firestore:3.0.14
com.google.cloud:google-cloud-monitoring:1.82.0
com.google.cloud:google-cloud-nio:0.124.4
com.google.cloud:google-cloud-nio:0.124.2
com.google.cloud:google-cloud-pubsub:1.116.0
com.google.cloud:google-cloud-pubsublite:1.5.0
com.google.cloud:google-cloud-secretmanager:2.2.0
com.google.cloud:google-cloud-spanner:6.20.0
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.cloud:google-cloud-tasks:2.2.0
com.google.cloud:grpc-gcp:1.1.0
com.google.cloud:proto-google-cloud-firestore-bundle-v1:3.0.14
@ -126,7 +124,7 @@ com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6
com.google.dagger:dagger:2.42
com.google.errorprone:error_prone_annotations:2.14.0
com.google.errorprone:error_prone_annotations:2.13.1
com.google.escapevelocity:escapevelocity:0.9.1
com.google.flatbuffers:flatbuffers-java:1.12.0
com.google.flogger:flogger-system-backend:0.7.4
@ -137,12 +135,12 @@ com.google.guava:guava-testlib:31.1-jre
com.google.guava:guava:31.1-jre
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
com.google.gwt:gwt-user:2.9.0
com.google.http-client:google-http-client-apache-v2:1.42.0
com.google.http-client:google-http-client-appengine:1.42.0
com.google.http-client:google-http-client-gson:1.42.0
com.google.http-client:google-http-client-apache-v2:1.41.8
com.google.http-client:google-http-client-appengine:1.41.8
com.google.http-client:google-http-client-gson:1.41.8
com.google.http-client:google-http-client-jackson2:1.41.8
com.google.http-client:google-http-client-protobuf:1.40.1
com.google.http-client:google-http-client:1.42.0
com.google.http-client:google-http-client:1.41.8
com.google.inject.extensions:guice-multibindings:4.1.0
com.google.inject:guice:4.1.0
com.google.j2objc:j2objc-annotations:1.3
@ -150,18 +148,17 @@ com.google.jsinterop:jsinterop-annotations:2.0.0
com.google.monitoring-client:contrib:1.0.7
com.google.monitoring-client:metrics:1.0.7
com.google.monitoring-client:stackdriver:1.0.7
com.google.oauth-client:google-oauth-client-appengine:1.34.1
com.google.oauth-client:google-oauth-client-java6:1.34.1
com.google.oauth-client:google-oauth-client-jetty:1.34.1
com.google.oauth-client:google-oauth-client-servlet:1.34.1
com.google.oauth-client:google-oauth-client:1.34.1
com.google.oauth-client:google-oauth-client-appengine:1.34.0
com.google.oauth-client:google-oauth-client-java6:1.34.0
com.google.oauth-client:google-oauth-client-jetty:1.34.0
com.google.oauth-client:google-oauth-client-servlet:1.34.0
com.google.oauth-client:google-oauth-client:1.34.0
com.google.protobuf:protobuf-java-util:3.20.1
com.google.protobuf:protobuf-java:3.20.1
com.google.re2j:re2j:1.6
com.google.template:soy:2021-02-01
com.google.truth.extensions:truth-java8-extension:1.1.3
com.google.truth:truth:1.1.3
com.googlecode.charts4j:charts4j:1.3
com.googlecode.json-simple:json-simple:1.1.1
com.ibm.icu:icu4j:71.1
com.jcraft:jsch:0.1.55
@ -229,7 +226,6 @@ io.opencensus:opencensus-impl-core:0.31.0
io.opencensus:opencensus-impl:0.31.0
io.opencensus:opencensus-proto:0.2.0
io.perfmark:perfmark-api:0.25.0
it.unimi.dsi:fastutil:6.5.16
javax.activation:activation:1.1
javax.activation:javax.activation-api:1.2.0
javax.annotation:javax.annotation-api:1.3.2
@ -283,7 +279,7 @@ org.apache.mina:mina-core:2.1.6
org.apache.sshd:sshd-core:2.0.0
org.apache.sshd:sshd-scp:2.0.0
org.apache.sshd:sshd-sftp:2.0.0
org.apache.tomcat:tomcat-annotations-api:10.1.0-M16
org.apache.tomcat:tomcat-annotations-api:10.1.0-M15
org.bouncycastle:bcpg-jdk15on:1.67
org.bouncycastle:bcpkix-jdk15on:1.67
org.bouncycastle:bcprov-jdk15on:1.67
@ -293,7 +289,6 @@ org.codehaus.jackson:jackson-core-asl:1.9.13
org.codehaus.jackson:jackson-mapper-asl:1.9.13
org.codehaus.mojo:animal-sniffer-annotations:1.21
org.conscrypt:conscrypt-openjdk-uber:2.5.1
org.easymock:easymock:3.0
org.flywaydb:flyway-core:8.5.12
org.glassfish.jaxb:jaxb-runtime:2.3.1
org.glassfish.jaxb:txw2:2.3.1
@ -334,7 +329,7 @@ org.ow2.asm:asm-commons:9.2
org.ow2.asm:asm-tree:9.3
org.ow2.asm:asm-util:9.3
org.ow2.asm:asm:9.3
org.postgresql:postgresql:42.4.0
org.postgresql:postgresql:42.3.6
org.rnorth.duct-tape:duct-tape:1.0.8
org.seleniumhq.selenium:selenium-api:3.141.59
org.seleniumhq.selenium:selenium-chrome-driver:3.141.59

View file

@ -4,7 +4,6 @@
antlr:antlr:2.7.7
aopalliance:aopalliance:1.0
args4j:args4j:2.0.23
cglib:cglib-nodep:2.2
com.101tec:zkclient:0.10
com.beust:jcommander:1.60
com.fasterxml.jackson.core:jackson-annotations:2.13.3
@ -28,11 +27,11 @@ com.github.jnr:jnr-posix:3.1.15
com.github.jnr:jnr-unixsocket:0.38.17
com.github.jnr:jnr-x86asm:1.0.2
com.google.android:annotations:4.1.1.4
com.google.api-client:google-api-client-appengine:1.35.1
com.google.api-client:google-api-client-appengine:1.35.0
com.google.api-client:google-api-client-jackson2:1.32.2
com.google.api-client:google-api-client-java6:1.35.1
com.google.api-client:google-api-client-servlet:1.35.1
com.google.api-client:google-api-client:1.35.1
com.google.api-client:google-api-client-java6:1.35.0
com.google.api-client:google-api-client-servlet:1.35.0
com.google.api-client:google-api-client:1.35.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1:2.10.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta1:0.134.0
com.google.api.grpc:grpc-google-cloud-bigquerystorage-v1beta2:0.134.0
@ -85,10 +84,9 @@ com.google.apis:google-api-services-iamcredentials:v1-rev20210326-1.32.1
com.google.apis:google-api-services-monitoring:v3-rev20220525-1.32.1
com.google.apis:google-api-services-pubsub:v1-rev20211130-1.32.1
com.google.apis:google-api-services-sheets:v4-rev20220411-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220513-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220604-1.32.1
com.google.apis:google-api-services-sqladmin:v1beta4-rev20220323-1.32.1
com.google.apis:google-api-services-storage:v1-rev20220509-1.32.1
com.google.appengine.tools:appengine-gcs-client:0.8.1
com.google.appengine.tools:appengine-mapreduce:0.9
com.google.appengine.tools:appengine-pipeline:0.2.13
com.google.appengine:appengine-api-1.0-sdk:2.0.5
com.google.appengine:appengine-api-stubs:2.0.5
@ -104,8 +102,8 @@ com.google.cloud.bigdataoss:util:2.2.6
com.google.cloud.bigtable:bigtable-client-core:1.26.3
com.google.cloud.bigtable:bigtable-metrics-api:1.26.3
com.google.cloud.datastore:datastore-v1-proto-client:2.1.3
com.google.cloud.sql:jdbc-socket-factory-core:1.6.1
com.google.cloud.sql:postgres-socket-factory:1.6.1
com.google.cloud.sql:jdbc-socket-factory-core:1.6.0
com.google.cloud.sql:postgres-socket-factory:1.6.0
com.google.cloud:google-cloud-bigquerystorage:2.10.0
com.google.cloud:google-cloud-bigtable:2.5.3
com.google.cloud:google-cloud-core-grpc:2.4.0
@ -113,12 +111,12 @@ com.google.cloud:google-cloud-core-http:2.7.1
com.google.cloud:google-cloud-core:2.7.1
com.google.cloud:google-cloud-firestore:3.0.14
com.google.cloud:google-cloud-monitoring:1.82.0
com.google.cloud:google-cloud-nio:0.124.4
com.google.cloud:google-cloud-nio:0.124.2
com.google.cloud:google-cloud-pubsub:1.116.0
com.google.cloud:google-cloud-pubsublite:1.5.0
com.google.cloud:google-cloud-secretmanager:2.2.0
com.google.cloud:google-cloud-spanner:6.20.0
com.google.cloud:google-cloud-storage:2.8.0
com.google.cloud:google-cloud-storage:2.7.2
com.google.cloud:google-cloud-tasks:2.2.0
com.google.cloud:grpc-gcp:1.1.0
com.google.cloud:proto-google-cloud-firestore-bundle-v1:3.0.14
@ -126,7 +124,7 @@ com.google.code.findbugs:jsr305:3.0.2
com.google.code.gson:gson:2.9.0
com.google.common.html.types:types:1.0.6
com.google.dagger:dagger:2.42
com.google.errorprone:error_prone_annotations:2.14.0
com.google.errorprone:error_prone_annotations:2.13.1
com.google.escapevelocity:escapevelocity:0.9.1
com.google.flatbuffers:flatbuffers-java:1.12.0
com.google.flogger:flogger-system-backend:0.7.4
@ -137,12 +135,12 @@ com.google.guava:guava-testlib:31.1-jre
com.google.guava:guava:31.1-jre
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
com.google.gwt:gwt-user:2.9.0
com.google.http-client:google-http-client-apache-v2:1.42.0
com.google.http-client:google-http-client-appengine:1.42.0
com.google.http-client:google-http-client-gson:1.42.0
com.google.http-client:google-http-client-apache-v2:1.41.8
com.google.http-client:google-http-client-appengine:1.41.8
com.google.http-client:google-http-client-gson:1.41.8
com.google.http-client:google-http-client-jackson2:1.41.8
com.google.http-client:google-http-client-protobuf:1.40.1
com.google.http-client:google-http-client:1.42.0
com.google.http-client:google-http-client:1.41.8
com.google.inject.extensions:guice-multibindings:4.1.0
com.google.inject:guice:4.1.0
com.google.j2objc:j2objc-annotations:1.3
@ -150,18 +148,17 @@ com.google.jsinterop:jsinterop-annotations:2.0.0
com.google.monitoring-client:contrib:1.0.7
com.google.monitoring-client:metrics:1.0.7
com.google.monitoring-client:stackdriver:1.0.7
com.google.oauth-client:google-oauth-client-appengine:1.34.1
com.google.oauth-client:google-oauth-client-java6:1.34.1
com.google.oauth-client:google-oauth-client-jetty:1.34.1
com.google.oauth-client:google-oauth-client-servlet:1.34.1
com.google.oauth-client:google-oauth-client:1.34.1
com.google.oauth-client:google-oauth-client-appengine:1.34.0
com.google.oauth-client:google-oauth-client-java6:1.34.0
com.google.oauth-client:google-oauth-client-jetty:1.34.0
com.google.oauth-client:google-oauth-client-servlet:1.34.0
com.google.oauth-client:google-oauth-client:1.34.0
com.google.protobuf:protobuf-java-util:3.20.1
com.google.protobuf:protobuf-java:3.20.1
com.google.re2j:re2j:1.6
com.google.template:soy:2021-02-01
com.google.truth.extensions:truth-java8-extension:1.1.3
com.google.truth:truth:1.1.3
com.googlecode.charts4j:charts4j:1.3
com.googlecode.json-simple:json-simple:1.1.1
com.ibm.icu:icu4j:71.1
com.jcraft:jsch:0.1.55
@ -229,7 +226,6 @@ io.opencensus:opencensus-impl-core:0.31.0
io.opencensus:opencensus-impl:0.31.0
io.opencensus:opencensus-proto:0.2.0
io.perfmark:perfmark-api:0.25.0
it.unimi.dsi:fastutil:6.5.16
javax.activation:activation:1.1
javax.activation:javax.activation-api:1.2.0
javax.annotation:javax.annotation-api:1.3.2
@ -283,7 +279,7 @@ org.apache.mina:mina-core:2.1.6
org.apache.sshd:sshd-core:2.0.0
org.apache.sshd:sshd-scp:2.0.0
org.apache.sshd:sshd-sftp:2.0.0
org.apache.tomcat:tomcat-annotations-api:10.1.0-M16
org.apache.tomcat:tomcat-annotations-api:10.1.0-M15
org.bouncycastle:bcpg-jdk15on:1.67
org.bouncycastle:bcpkix-jdk15on:1.67
org.bouncycastle:bcprov-jdk15on:1.67
@ -293,7 +289,6 @@ org.codehaus.jackson:jackson-core-asl:1.9.13
org.codehaus.jackson:jackson-mapper-asl:1.9.13
org.codehaus.mojo:animal-sniffer-annotations:1.21
org.conscrypt:conscrypt-openjdk-uber:2.5.1
org.easymock:easymock:3.0
org.flywaydb:flyway-core:8.5.12
org.glassfish.jaxb:jaxb-runtime:2.3.1
org.glassfish.jaxb:txw2:2.3.1
@ -334,7 +329,7 @@ org.ow2.asm:asm-commons:9.2
org.ow2.asm:asm-tree:9.3
org.ow2.asm:asm-util:9.3
org.ow2.asm:asm:9.3
org.postgresql:postgresql:42.4.0
org.postgresql:postgresql:42.3.6
org.rnorth.duct-tape:duct-tape:1.0.8
org.seleniumhq.selenium:selenium-api:3.141.59
org.seleniumhq.selenium:selenium-chrome-driver:3.141.59

View file

@ -1,111 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import static com.google.appengine.api.ThreadManager.currentRequestThreadFactory;
import static com.google.common.util.concurrent.MoreExecutors.listeningDecorator;
import static google.registry.backup.ExportCommitLogDiffAction.LOWER_CHECKPOINT_TIME_PARAM;
import static google.registry.backup.ExportCommitLogDiffAction.UPPER_CHECKPOINT_TIME_PARAM;
import static google.registry.backup.RestoreCommitLogsAction.BUCKET_OVERRIDE_PARAM;
import static google.registry.backup.RestoreCommitLogsAction.FROM_TIME_PARAM;
import static google.registry.backup.RestoreCommitLogsAction.TO_TIME_PARAM;
import static google.registry.request.RequestParameters.extractOptionalParameter;
import static google.registry.request.RequestParameters.extractRequiredDatetimeParameter;
import static google.registry.request.RequestParameters.extractRequiredParameter;
import static java.util.concurrent.Executors.newFixedThreadPool;
import com.google.common.primitives.Ints;
import com.google.common.util.concurrent.ListeningExecutorService;
import dagger.Module;
import dagger.Provides;
import google.registry.cron.CommitLogFanoutAction;
import google.registry.request.HttpException.BadRequestException;
import google.registry.request.Parameter;
import java.lang.annotation.Documented;
import java.util.Optional;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import javax.inject.Qualifier;
import javax.servlet.http.HttpServletRequest;
import org.joda.time.DateTime;
/**
* Dagger module for backup package.
*
* @see "google.registry.module.backend.BackendComponent"
*/
@Module
public final class BackupModule {
/** Dagger qualifier for backups. */
@Qualifier
@Documented
public @interface Backups {}
/** Number of threads in the threaded executor. */
private static final int NUM_THREADS = 10;
@Provides
@Parameter("bucket")
static int provideBucket(HttpServletRequest req) {
String param = extractRequiredParameter(req, CommitLogFanoutAction.BUCKET_PARAM);
Integer bucket = Ints.tryParse(param);
if (bucket == null) {
throw new BadRequestException("Bad bucket id");
}
return bucket;
}
@Provides
@Parameter(LOWER_CHECKPOINT_TIME_PARAM)
static DateTime provideLowerCheckpointKey(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, LOWER_CHECKPOINT_TIME_PARAM);
}
@Provides
@Parameter(UPPER_CHECKPOINT_TIME_PARAM)
static DateTime provideUpperCheckpointKey(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, UPPER_CHECKPOINT_TIME_PARAM);
}
@Provides
@Parameter(BUCKET_OVERRIDE_PARAM)
static Optional<String> provideBucketOverride(HttpServletRequest req) {
return extractOptionalParameter(req, BUCKET_OVERRIDE_PARAM);
}
@Provides
@Parameter(FROM_TIME_PARAM)
static DateTime provideFromTime(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, FROM_TIME_PARAM);
}
@Provides
@Parameter(TO_TIME_PARAM)
static DateTime provideToTime(HttpServletRequest req) {
return extractRequiredDatetimeParameter(req, TO_TIME_PARAM);
}
@Provides
@Backups
static ListeningExecutorService provideListeningExecutorService() {
return listeningDecorator(newFixedThreadPool(NUM_THREADS, currentRequestThreadFactory()));
}
@Provides
static ScheduledExecutorService provideScheduledExecutorService() {
return Executors.newSingleThreadScheduledExecutor();
}
}

View file

@ -1,83 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import com.google.appengine.api.datastore.EntityTranslator;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.ImmutableList;
import com.google.storage.onestore.v3.OnestoreEntity.EntityProto;
import google.registry.model.ImmutableObject;
import google.registry.model.annotations.DeleteAfterMigration;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Iterator;
/** Utilities for working with backups. */
@DeleteAfterMigration
public class BackupUtils {
/** Keys for user metadata fields on commit log files in GCS. */
public static final class GcsMetadataKeys {
private GcsMetadataKeys() {}
public static final String NUM_TRANSACTIONS = "num_transactions";
public static final String LOWER_BOUND_CHECKPOINT = "lower_bound_checkpoint";
public static final String UPPER_BOUND_CHECKPOINT = "upper_bound_checkpoint";
}
/**
* Converts the given {@link ImmutableObject} to a raw Datastore entity and write it to an {@link
* OutputStream} in delimited protocol buffer format.
*/
static void serializeEntity(ImmutableObject entity, OutputStream stream) throws IOException {
EntityTranslator.convertToPb(auditedOfy().saveIgnoringReadOnlyWithoutBackup().toEntity(entity))
.writeDelimitedTo(stream);
}
/**
* Return an iterator of {@link ImmutableObject} instances deserialized from the given stream.
*
* <p>This parses out delimited protocol buffers for raw Datastore entities and then Ofy-loads
* those as {@link ImmutableObject}.
*
* <p>The iterator reads from the stream on demand, and as such will fail if the stream is closed.
*/
public static Iterator<ImmutableObject> createDeserializingIterator(
final InputStream input, boolean withAppIdOverride) {
return new AbstractIterator<ImmutableObject>() {
@Override
protected ImmutableObject computeNext() {
EntityProto proto = new EntityProto();
if (proto.parseDelimitedFrom(input)) { // False means end of stream; other errors throw.
if (withAppIdOverride) {
proto = EntityImports.fixEntity(proto);
}
return auditedOfy().load().fromEntity(EntityTranslator.createFromPb(proto));
}
return endOfData();
}
};
}
public static ImmutableList<ImmutableObject> deserializeEntities(byte[] bytes) {
return ImmutableList.copyOf(
createDeserializingIterator(new ByteArrayInputStream(bytes), false));
}
}

View file

@ -1,127 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import static google.registry.backup.ExportCommitLogDiffAction.LOWER_CHECKPOINT_TIME_PARAM;
import static google.registry.backup.ExportCommitLogDiffAction.UPPER_CHECKPOINT_TIME_PARAM;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import static google.registry.persistence.transaction.TransactionManagerFactory.ofyTm;
import static google.registry.util.DateTimeUtils.isBeforeOrAt;
import com.google.common.collect.ImmutableMultimap;
import com.google.common.flogger.FluentLogger;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.ofy.CommitLogCheckpoint;
import google.registry.model.ofy.CommitLogCheckpointRoot;
import google.registry.request.Action;
import google.registry.request.Action.Service;
import google.registry.request.auth.Auth;
import google.registry.util.CloudTasksUtils;
import java.util.Optional;
import javax.inject.Inject;
import org.joda.time.DateTime;
import org.joda.time.Duration;
/**
* Action that saves commit log checkpoints to Datastore and kicks off a diff export task.
*
* <p>We separate computing and saving the checkpoint from exporting it because the export to GCS is
* retryable but should not require the computation of a new checkpoint. Saving the checkpoint and
* enqueuing the export task are done transactionally, so any checkpoint that is saved will be
* exported to GCS very soon.
*
* <p>This action's supported method is GET rather than POST because it gets invoked via cron.
*/
@Action(
service = Action.Service.BACKEND,
path = "/_dr/cron/commitLogCheckpoint",
method = Action.Method.GET,
automaticallyPrintOk = true,
auth = Auth.AUTH_INTERNAL_OR_ADMIN)
@DeleteAfterMigration
public final class CommitLogCheckpointAction implements Runnable {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private static final String QUEUE_NAME = "export-commits";
/**
* The amount of time enqueueing should be delayed.
*
* <p>The {@link ExportCommitLogDiffAction} is enqueued in {@link CommitLogCheckpointAction},
* which is inside a Datastore transaction that persists the checkpoint to be exported. After the
* switch to CloudTasks API, the task may be invoked before the Datastore transaction commits.
* When this happens, the checkpoint is not found which leads to {@link
* com.google.common.base.VerifyException}.
*
* <p>In order to invoke the task after the transaction commits, a reasonable delay should be
* added to each task. The latency of the request is mostly in the range of 4-6 seconds; Choosing
* a value 30% greater than the upper bound should solve the issue invoking a task before the
* transaction commits.
*/
static final Duration ENQUEUE_DELAY_SECONDS = Duration.standardSeconds(8);
@Inject CommitLogCheckpointStrategy strategy;
@Inject CloudTasksUtils cloudTasksUtils;
@Inject CommitLogCheckpointAction() {}
@Override
public void run() {
createCheckPointAndStartAsyncExport();
}
/**
* Creates a {@link CommitLogCheckpoint} and initiates an asynchronous export task.
*
* @return the {@code CommitLogCheckpoint} to be exported
*/
public Optional<CommitLogCheckpoint> createCheckPointAndStartAsyncExport() {
final CommitLogCheckpoint checkpoint = strategy.computeCheckpoint();
logger.atInfo().log(
"Generated candidate checkpoint for time: %s", checkpoint.getCheckpointTime());
boolean isCheckPointPersisted =
ofyTm()
.transact(
() -> {
DateTime lastWrittenTime =
CommitLogCheckpointRoot.loadRoot().getLastWrittenTime();
if (isBeforeOrAt(checkpoint.getCheckpointTime(), lastWrittenTime)) {
logger.atInfo().log(
"Newer checkpoint already written at time: %s", lastWrittenTime);
return false;
}
auditedOfy()
.saveIgnoringReadOnlyWithoutBackup()
.entities(
checkpoint,
CommitLogCheckpointRoot.create(checkpoint.getCheckpointTime()));
// Enqueue a diff task between previous and current checkpoints.
cloudTasksUtils.enqueue(
QUEUE_NAME,
cloudTasksUtils.createPostTaskWithDelay(
ExportCommitLogDiffAction.PATH,
Service.BACKEND.toString(),
ImmutableMultimap.of(
LOWER_CHECKPOINT_TIME_PARAM,
lastWrittenTime.toString(),
UPPER_CHECKPOINT_TIME_PARAM,
checkpoint.getCheckpointTime().toString()),
ENQUEUE_DELAY_SECONDS));
return true;
});
return isCheckPointPersisted ? Optional.of(checkpoint) : Optional.empty();
}
}

View file

@ -1,164 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import static com.google.common.collect.Iterables.getOnlyElement;
import static com.google.common.collect.Maps.transformValues;
import static google.registry.model.ofy.CommitLogBucket.getBucketKey;
import static google.registry.util.DateTimeUtils.END_OF_TIME;
import static google.registry.util.DateTimeUtils.earliestOf;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import com.googlecode.objectify.Key;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.ofy.CommitLogBucket;
import google.registry.model.ofy.CommitLogCheckpoint;
import google.registry.model.ofy.CommitLogManifest;
import google.registry.model.ofy.Ofy;
import google.registry.util.Clock;
import java.util.List;
import java.util.Map.Entry;
import javax.inject.Inject;
import org.joda.time.DateTime;
/**
* Implementation of the procedure for determining point-in-time consistent commit log checkpoint.
*
* <p>This algorithm examines the recently written commit log data and uses a dual-read approach to
* determine a point-in-time consistent set of checkpoint times for the commit log buckets. By
* "consistent" we mean, generally speaking, that if the Datastore were restored by replaying all
* the commit logs up to the checkpoint times of the buckets, the result would be transactionally
* correct; there must be no "holes" where restored state depends on non-restored state.
*
* <p>The consistency guarantee really has two parts, only one of which is provided by this
* algorithm. The procedure below guarantees only that if the resulting checkpoint includes any
* given commit log, it will also include all the commit logs that were both 1) actually written
* before that commit log "in real life", and 2) have an earlier timestamp than that commit log.
* (These criteria do not necessarily imply each other, due to the lack of a global shared clock.)
* The rest of the guarantee comes from our Ofy customizations, which ensure that any transaction
* that depends on state from a previous transaction does indeed have a later timestamp.
*
* <h2>Procedure description</h2>
*
* <pre>{@code
* ComputeCheckpoint() -> returns a set consisting of a timestamp c(b_i) for every bucket b_i
*
* 1) read off the latest commit timestamp t(b_i) for every bucket b_i
* 2) iterate over the buckets b_i a second time, and
* a) do a consistent query for the next commit timestamp t'(b_i) where t'(b_i) > t(b_i)
* b) if present, add this timestamp t'(b_i) to a set S
* 3) compute a threshold time T* representing a time before all commits in S, as follows:
* a) if S is empty, let T* = + (or the "end of time")
* b) else, let T* = T - Δ, for T = min(S) and some small Δ > 0
* 4) return the set given by: min(t(b_i), T*) for all b_i
* }</pre>
*
* <h2>Correctness proof of algorithm</h2>
*
* <p>{@literal As described above, the algorithm is correct as long as it can ensure the following:
* given a commit log X written at time t(X) to bucket b_x, and another commit log Y that was
* written "in real life" before X and for which t(Y) < t(X), then if X is included in the
* checkpoint, so is Y; that is, t(X) <= c(b_x) implies t(Y) <= c(b_y). }
*
* <p>{@literal To prove this, first note that we always have c(b_i) <= t(b_i) for every b_i, i.e.
* every commit log included in the checkpoint must have been seen in the first pass. Hence if X was
* included, then X must have been written by the time we started the second pass. But since Y was
* written "in real life" prior to X, we must have seen Y by the second pass too. }
*
* <p>{@literal Now assume towards a contradiction that X is indeed included but Y is not, i.e. that
* we have t(X) <= c(b_x) but t(Y) > c(b_y). If Y was seen in the first pass, i.e. t(Y) <= t(b_y),
* then by our assumption c(b_y) < t(Y) <= t(b_y), and therefore c(b_y) != t(b_y). By the definition
* of c(b_y) it must then equal T*, so we have T* < t(Y). However, this is a contradiction since
* t(Y) < t(X) and t(X) <= c(b_x) <= T*. If instead Y was seen in the second pass but not the first,
* t'(b_y) exists and we must have t'(b_y) <= t(Y), but then since T* < T <= t'(b_y) by definition,
* we again reach the contradiction T* < t(Y). }
*/
@DeleteAfterMigration
class CommitLogCheckpointStrategy {
@Inject Ofy ofy;
@Inject Clock clock;
@Inject CommitLogCheckpointStrategy() {}
/** Compute and return a new CommitLogCheckpoint for the current point in time. */
public CommitLogCheckpoint computeCheckpoint() {
DateTime checkpointTime = clock.nowUtc();
ImmutableMap<Integer, DateTime> firstPassTimes = readBucketTimestamps();
DateTime threshold = readNewCommitLogsAndFindThreshold(firstPassTimes);
return CommitLogCheckpoint.create(
checkpointTime,
computeBucketCheckpointTimes(firstPassTimes, threshold));
}
/**
* Returns a map from all bucket IDs to their current last written time values, fetched without
* a transaction so with no guarantee of consistency across buckets.
*/
@VisibleForTesting
ImmutableMap<Integer, DateTime> readBucketTimestamps() {
// Use a fresh session cache so that we get the latest data from Datastore.
return ofy.doWithFreshSessionCache(
() ->
CommitLogBucket.loadAllBuckets()
.stream()
.collect(
ImmutableMap.toImmutableMap(
CommitLogBucket::getBucketNum, CommitLogBucket::getLastWrittenTime)));
}
/**
* Returns a threshold value defined as the latest timestamp that is before all new commit logs,
* where "new" means having a commit time after the per-bucket timestamp in the given map.
* When no such commit logs exist, the threshold value is set to END_OF_TIME.
*/
@VisibleForTesting
DateTime readNewCommitLogsAndFindThreshold(ImmutableMap<Integer, DateTime> bucketTimes) {
DateTime timeBeforeAllNewCommits = END_OF_TIME;
for (Entry<Integer, DateTime> entry : bucketTimes.entrySet()) {
Key<CommitLogBucket> bucketKey = getBucketKey(entry.getKey());
DateTime bucketTime = entry.getValue();
// Add 1 to handle START_OF_TIME since 0 isn't a valid id - filter then uses >= instead of >.
Key<CommitLogManifest> keyForFilter =
Key.create(CommitLogManifest.create(bucketKey, bucketTime.plusMillis(1), null));
List<Key<CommitLogManifest>> manifestKeys =
ofy.load()
.type(CommitLogManifest.class)
.ancestor(bucketKey)
.filterKey(">=", keyForFilter)
.limit(1)
.keys()
.list();
if (!manifestKeys.isEmpty()) {
timeBeforeAllNewCommits = earliestOf(
timeBeforeAllNewCommits,
CommitLogManifest.extractCommitTime(getOnlyElement(manifestKeys)).minusMillis(1));
}
}
return timeBeforeAllNewCommits;
}
/**
* Returns the bucket checkpoint times produced by clamping the given set of bucket timestamps to
* at most the given threshold value.
*/
@VisibleForTesting
ImmutableMap<Integer, DateTime> computeBucketCheckpointTimes(
ImmutableMap<Integer, DateTime> firstPassTimes,
final DateTime threshold) {
return ImmutableMap.copyOf(
transformValues(firstPassTimes, firstPassTime -> earliestOf(firstPassTime, threshold)));
}
}

View file

@ -1,346 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import static google.registry.mapreduce.MapreduceRunner.PARAM_DRY_RUN;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import static google.registry.persistence.transaction.TransactionManagerFactory.ofyTm;
import static java.lang.Boolean.FALSE;
import static java.lang.Boolean.TRUE;
import com.google.appengine.tools.mapreduce.Mapper;
import com.google.appengine.tools.mapreduce.Reducer;
import com.google.appengine.tools.mapreduce.ReducerInput;
import com.google.auto.value.AutoValue;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMultiset;
import com.google.common.flogger.FluentLogger;
import com.googlecode.objectify.Key;
import google.registry.config.RegistryConfig.Config;
import google.registry.mapreduce.MapreduceRunner;
import google.registry.mapreduce.inputs.CommitLogManifestInput;
import google.registry.mapreduce.inputs.EppResourceInputs;
import google.registry.model.EppResource;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.ofy.CommitLogManifest;
import google.registry.model.ofy.CommitLogMutation;
import google.registry.model.translators.CommitLogRevisionsTranslatorFactory;
import google.registry.request.Action;
import google.registry.request.Parameter;
import google.registry.request.Response;
import google.registry.request.auth.Auth;
import google.registry.util.Clock;
import javax.inject.Inject;
import org.joda.time.DateTime;
import org.joda.time.Duration;
/**
* Task that garbage collects old {@link CommitLogManifest} entities.
*
* <p>Once commit logs have been written to GCS, we don't really need them in Datastore anymore,
* except to reconstruct point-in-time snapshots of the database. To make that possible, {@link
* EppResource}s have a {@link EppResource#getRevisions} method that returns the commit logs for
* older points in time. But that functionality is not useful after a certain amount of time, e.g.
* thirty days, so unneeded revisions are deleted (see {@link CommitLogRevisionsTranslatorFactory}).
* This leaves commit logs in the system that are unneeded (have no revisions pointing to them). So
* this task runs periodically to delete the "orphan" commit logs.
*
* <p>This action runs a mapreduce that goes over all existing {@link EppResource} and all {@link
* CommitLogManifest} older than commitLogDatastreRetention, and erases the commit logs aren't in an
* EppResource.
*/
@Action(
service = Action.Service.BACKEND,
path = "/_dr/task/deleteOldCommitLogs",
auth = Auth.AUTH_INTERNAL_OR_ADMIN)
// No longer needed in SQL. Subject to future removal.
@Deprecated
@DeleteAfterMigration
public final class DeleteOldCommitLogsAction implements Runnable {
private static final int NUM_MAP_SHARDS = 20;
private static final int NUM_REDUCE_SHARDS = 10;
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
@Inject MapreduceRunner mrRunner;
@Inject Response response;
@Inject Clock clock;
@Inject
@Config("commitLogDatastoreRetention")
Duration maxAge;
@Inject
@Parameter(PARAM_DRY_RUN)
boolean isDryRun;
@Inject
DeleteOldCommitLogsAction() {}
@Override
public void run() {
DateTime deletionThreshold = clock.nowUtc().minus(maxAge);
logger.atInfo().log(
"Processing asynchronous deletion of unreferenced CommitLogManifests older than %s.",
deletionThreshold);
mrRunner
.setJobName("Delete old commit logs")
.setModuleName("backend")
.setDefaultMapShards(NUM_MAP_SHARDS)
.setDefaultReduceShards(NUM_REDUCE_SHARDS)
.runMapreduce(
new DeleteOldCommitLogsMapper(deletionThreshold),
new DeleteOldCommitLogsReducer(deletionThreshold, isDryRun),
ImmutableList.of(
new CommitLogManifestInput(deletionThreshold),
EppResourceInputs.createKeyInput(EppResource.class)))
.sendLinkToMapreduceConsole(response);
}
/**
* A mapper that iterates over all {@link EppResource} and {CommitLogManifest} entities.
*
* <p>It emits the target key and {@code false} for all revisions of each EppResources (meaning
* "don't delete this"), and {@code true} for all CommitLogRevisions (meaning "delete this").
*
* <p>The reducer will then delete all CommitLogRevisions that only have {@code true}.
*/
private static class DeleteOldCommitLogsMapper
extends Mapper<Key<?>, Key<CommitLogManifest>, Boolean> {
private static final long serialVersionUID = 8008689353479902948L;
private static final String KIND_MANIFEST = Key.getKind(CommitLogManifest.class);
private final DateTime threshold;
DeleteOldCommitLogsMapper(DateTime threshold) {
this.threshold = threshold;
}
@Override
public void map(final Key<?> key) {
// key is either a Key<CommitLogManifest> or a Key<? extends EppResource>.
//
// If it's a CommitLogManifest we just emit it as is (no need to load it).
if (key.getKind().equals(KIND_MANIFEST)) {
getContext().incrementCounter("old commit log manifests found");
// safe because we checked getKind
@SuppressWarnings("unchecked")
Key<CommitLogManifest> manifestKey = (Key<CommitLogManifest>) key;
emit(manifestKey, true);
return;
}
// If it isn't a Key<CommitLogManifest> then it should be an EppResource, which we need to
// load to emit the revisions.
//
Object object = auditedOfy().load().key(key).now();
checkNotNull(object, "Received a key to a missing object. key: %s", key);
checkState(
object instanceof EppResource,
"Received a key to an object that isn't EppResource nor CommitLogManifest."
+ " Key: %s object type: %s",
key,
object.getClass().getName());
getContext().incrementCounter("EPP resources found");
EppResource eppResource = (EppResource) object;
if (eppResource.getCreationTime().isAfter(threshold)) {
getContext().incrementCounter("EPP resources newer than threshold");
}
for (Key<CommitLogManifest> manifestKey : eppResource.getRevisions().values()) {
emit(manifestKey, false);
}
getContext()
.incrementCounter("EPP resource revisions found", eppResource.getRevisions().size());
checkAndLogRevisionCoverageError(eppResource);
}
/**
* Check if given eppResource has the required revisions.
*
* <p>Revisions are used to recreate the state of the resource at a given day in the past
* "commitLogDatastoreRenention". To do that, we need at least one revision that's older than
* this duration (is dated before "threshold"), or at least one revision within a day of the
* resource's creation if it was created after the threshold.
*
* <p>Here we check that the given eppResource has the revisions it needs.
*
* <p>It's just a sanity check - since we're relying on the revisions to be correct for the
* deletion to work. We want to alert any problems we find in the revisions.
*
* <p>This really checks {@link CommitLogRevisionsTranslatorFactory#transformBeforeSave}.
* There's nothing we can do at this point to prevent the damage - we only report on it.
*/
private void checkAndLogRevisionCoverageError(EppResource eppResource) {
// First - check if there even are revisions
if (eppResource.getRevisions().isEmpty()) {
getContext().incrementCounter("EPP resources missing all revisions (SEE LOGS)");
logger.atSevere().log("EPP resource missing all revisions: %s", Key.create(eppResource));
return;
}
// Next, check if there's a revision that's older than "CommitLogDatastoreRetention". There
// should have been at least one at the time this resource was saved.
//
// Alternatively, if the resource is newer than the threshold - there should be at least one
// revision within a day of the creation time.
DateTime oldestRevisionDate = eppResource.getRevisions().firstKey();
if (oldestRevisionDate.isBefore(threshold)
|| oldestRevisionDate.isBefore(eppResource.getCreationTime().plusDays(1))) {
// We're OK!
return;
}
// The oldest revision date is newer than the threshold! This shouldn't happen.
getContext().incrementCounter("EPP resources missing pre-threshold revision (SEE LOGS)");
logger.atSevere().log(
"EPP resource missing old enough revision: "
+ "%s (created on %s) has %d revisions between %s and %s, while threshold is %s.",
Key.create(eppResource),
eppResource.getCreationTime(),
eppResource.getRevisions().size(),
eppResource.getRevisions().firstKey(),
eppResource.getRevisions().lastKey(),
threshold);
// We want to see how bad it is though: if the difference is less than a day then this might
// still be OK (we only need logs for the end of the day). But if it's more than a day, then
// we are 100% sure we can't recreate all the history we need from the revisions.
Duration interval = new Duration(threshold, oldestRevisionDate);
if (interval.isLongerThan(Duration.standardDays(1))) {
getContext()
.incrementCounter("EPP resources missing pre-(threshold+1d) revision (SEE LOGS)");
}
}
}
/**
* Reducer that deletes unreferenced {@link CommitLogManifest} + child {@link CommitLogMutation}.
*
* <p>It receives the manifestKey to possibly delete, and a list of boolean 'verdicts' from
* various sources (the "old manifests" source and the "still referenced" source) on whether it's
* OK to delete this manifestKey. If even one source returns "false" (meaning "it's not OK to
* delete this manifest") then it won't be deleted.
*/
static class DeleteOldCommitLogsReducer extends Reducer<Key<CommitLogManifest>, Boolean, Void> {
private static final long serialVersionUID = -4918760187627937268L;
private final DateTime deletionThreshold;
private final boolean isDryRun;
@AutoValue
abstract static class DeletionResult {
enum Status {
ALREADY_DELETED,
AFTER_THRESHOLD,
SUCCESS
}
public abstract Status status();
public abstract int numDeleted();
static DeletionResult create(Status status, int numDeleted) {
return new AutoValue_DeleteOldCommitLogsAction_DeleteOldCommitLogsReducer_DeletionResult(
status, numDeleted);
}
}
DeleteOldCommitLogsReducer(DateTime deletionThreshold, boolean isDryRun) {
this.deletionThreshold = deletionThreshold;
this.isDryRun = isDryRun;
}
@Override
public void reduce(
final Key<CommitLogManifest> manifestKey, ReducerInput<Boolean> canDeleteVerdicts) {
ImmutableMultiset<Boolean> canDeleteMultiset = ImmutableMultiset.copyOf(canDeleteVerdicts);
if (canDeleteMultiset.count(TRUE) > 1) {
getContext().incrementCounter("commit log manifests incorrectly mapped multiple times");
}
if (canDeleteMultiset.count(FALSE) > 1) {
getContext().incrementCounter("commit log manifests referenced multiple times");
}
if (canDeleteMultiset.contains(FALSE)) {
getContext()
.incrementCounter(
canDeleteMultiset.contains(TRUE)
? "old commit log manifests still referenced"
: "new (or nonexistent) commit log manifests referenced");
getContext()
.incrementCounter("EPP resource revisions handled", canDeleteMultiset.count(FALSE));
return;
}
DeletionResult deletionResult =
ofyTm()
.transactNew(
() -> {
CommitLogManifest manifest = auditedOfy().load().key(manifestKey).now();
// It is possible that the same manifestKey was run twice, if a shard had to be
// restarted or some weird failure. If this happens, we want to exit
// immediately. Note that this can never happen in dryRun.
if (manifest == null) {
return DeletionResult.create(DeletionResult.Status.ALREADY_DELETED, 0);
}
// Doing a sanity check on the date. This is the only place we use the
// CommitLogManifest, so maybe removing this test will improve performance.
// However, unless it's proven that the performance boost is significant (and
// we've tested this enough to be sure it never happens)- the safety of "let's
// not delete stuff we need from prod" is more important.
if (manifest.getCommitTime().isAfter(deletionThreshold)) {
return DeletionResult.create(DeletionResult.Status.AFTER_THRESHOLD, 0);
}
Iterable<Key<CommitLogMutation>> commitLogMutationKeys =
auditedOfy()
.load()
.type(CommitLogMutation.class)
.ancestor(manifestKey)
.keys()
.iterable();
ImmutableList<Key<?>> keysToDelete =
ImmutableList.<Key<?>>builder()
.addAll(commitLogMutationKeys)
.add(manifestKey)
.build();
// Normally in a dry run we would log the entities that would be deleted, but
// those can number in the millions so we skip the logging.
if (!isDryRun) {
auditedOfy().deleteWithoutBackup().keys(keysToDelete);
}
return DeletionResult.create(
DeletionResult.Status.SUCCESS, keysToDelete.size());
});
switch (deletionResult.status()) {
case SUCCESS:
getContext().incrementCounter("old commit log manifests deleted");
getContext().incrementCounter("total entities deleted", deletionResult.numDeleted());
break;
case ALREADY_DELETED:
getContext().incrementCounter("attempts to delete an already deleted manifest");
break;
case AFTER_THRESHOLD:
logger.atSevere().log(
"Won't delete CommitLogManifest %s that is too recent.", manifestKey);
getContext().incrementCounter("manifests incorrectly assigned for deletion (SEE LOGS)");
break;
}
}
}
}

View file

@ -1,114 +0,0 @@
// Copyright 2021 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import com.google.apphosting.api.ApiProxy;
import com.google.storage.onestore.v3.OnestoreEntity;
import com.google.storage.onestore.v3.OnestoreEntity.EntityProto;
import com.google.storage.onestore.v3.OnestoreEntity.Path;
import com.google.storage.onestore.v3.OnestoreEntity.Property.Meaning;
import com.google.storage.onestore.v3.OnestoreEntity.PropertyValue.ReferenceValue;
import google.registry.model.annotations.DeleteAfterMigration;
import java.util.Objects;
/** Utilities for handling imported Datastore entities. */
@DeleteAfterMigration
public class EntityImports {
/**
* Transitively sets the {@code appId} of all keys in a foreign entity to that of the current
* system.
*/
public static EntityProto fixEntity(EntityProto entityProto) {
String currentAappId = ApiProxy.getCurrentEnvironment().getAppId();
if (Objects.equals(currentAappId, entityProto.getKey().getApp())) {
return entityProto;
}
return fixEntity(entityProto, currentAappId);
}
private static EntityProto fixEntity(EntityProto entityProto, String appId) {
if (entityProto.hasKey()) {
fixKey(entityProto, appId);
}
for (OnestoreEntity.Property property : entityProto.mutablePropertys()) {
fixProperty(property, appId);
}
for (OnestoreEntity.Property property : entityProto.mutableRawPropertys()) {
fixProperty(property, appId);
}
// CommitLogMutation embeds an entity as bytes, which needs additional fixes.
if (isCommitLogMutation(entityProto)) {
fixMutationEntityProtoBytes(entityProto, appId);
}
return entityProto;
}
private static boolean isCommitLogMutation(EntityProto entityProto) {
if (!entityProto.hasKey()) {
return false;
}
Path path = entityProto.getKey().getPath();
if (path.elementSize() == 0) {
return false;
}
return Objects.equals(path.getElement(path.elementSize() - 1).getType(), "CommitLogMutation");
}
private static void fixMutationEntityProtoBytes(EntityProto entityProto, String appId) {
for (OnestoreEntity.Property property : entityProto.mutableRawPropertys()) {
if (Objects.equals(property.getName(), "entityProtoBytes")) {
OnestoreEntity.PropertyValue value = property.getValue();
EntityProto fixedProto =
fixEntity(bytesToEntityProto(value.getStringValueAsBytes()), appId);
value.setStringValueAsBytes(fixedProto.toByteArray());
return;
}
}
}
private static void fixKey(EntityProto entityProto, String appId) {
entityProto.getMutableKey().setApp(appId);
}
private static void fixKey(ReferenceValue referenceValue, String appId) {
referenceValue.setApp(appId);
}
private static void fixProperty(OnestoreEntity.Property property, String appId) {
OnestoreEntity.PropertyValue value = property.getMutableValue();
if (value.hasReferenceValue()) {
fixKey(value.getMutableReferenceValue(), appId);
return;
}
if (property.getMeaningEnum().equals(Meaning.ENTITY_PROTO)) {
EntityProto embeddedProto = bytesToEntityProto(value.getStringValueAsBytes());
fixEntity(embeddedProto, appId);
value.setStringValueAsBytes(embeddedProto.toByteArray());
}
}
private static EntityProto bytesToEntityProto(byte[] bytes) {
EntityProto entityProto = new EntityProto();
boolean isParsed = entityProto.parseFrom(bytes);
if (!isParsed) {
throw new IllegalStateException("Failed to parse raw bytes as EntityProto.");
}
return entityProto;
}
}

View file

@ -1,223 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import static com.google.common.base.MoreObjects.firstNonNull;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Verify.verifyNotNull;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.collect.Iterables.concat;
import static com.google.common.collect.Lists.partition;
import static google.registry.backup.BackupUtils.GcsMetadataKeys.LOWER_BOUND_CHECKPOINT;
import static google.registry.backup.BackupUtils.GcsMetadataKeys.NUM_TRANSACTIONS;
import static google.registry.backup.BackupUtils.GcsMetadataKeys.UPPER_BOUND_CHECKPOINT;
import static google.registry.backup.BackupUtils.serializeEntity;
import static google.registry.model.ofy.CommitLogBucket.getBucketKey;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import static google.registry.util.DateTimeUtils.START_OF_TIME;
import static google.registry.util.DateTimeUtils.isAtOrAfter;
import static java.util.Comparator.comparingLong;
import com.google.cloud.storage.BlobId;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Streams;
import com.google.common.flogger.FluentLogger;
import com.googlecode.objectify.Key;
import google.registry.config.RegistryConfig.Config;
import google.registry.gcs.GcsUtils;
import google.registry.model.ImmutableObject;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.ofy.CommitLogBucket;
import google.registry.model.ofy.CommitLogCheckpoint;
import google.registry.model.ofy.CommitLogManifest;
import google.registry.model.ofy.CommitLogMutation;
import google.registry.request.Action;
import google.registry.request.Parameter;
import google.registry.request.auth.Auth;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import javax.annotation.Nullable;
import javax.inject.Inject;
import org.joda.time.DateTime;
/** Action that exports the diff between two commit log checkpoints to GCS. */
@Action(
service = Action.Service.BACKEND,
path = ExportCommitLogDiffAction.PATH,
method = Action.Method.POST,
automaticallyPrintOk = true,
auth = Auth.AUTH_INTERNAL_OR_ADMIN)
@DeleteAfterMigration
public final class ExportCommitLogDiffAction implements Runnable {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
static final String PATH = "/_dr/task/exportCommitLogDiff";
static final String UPPER_CHECKPOINT_TIME_PARAM = "upperCheckpointTime";
static final String LOWER_CHECKPOINT_TIME_PARAM = "lowerCheckpointTime";
public static final String DIFF_FILE_PREFIX = "commit_diff_until_";
@Inject GcsUtils gcsUtils;
@Inject @Config("commitLogGcsBucket") String gcsBucket;
@Inject @Config("commitLogDiffExportBatchSize") int batchSize;
@Inject @Parameter(LOWER_CHECKPOINT_TIME_PARAM) DateTime lowerCheckpointTime;
@Inject @Parameter(UPPER_CHECKPOINT_TIME_PARAM) DateTime upperCheckpointTime;
@Inject ExportCommitLogDiffAction() {}
@Override
public void run() {
logger.atInfo().log(
"Exporting commit log diffs between %s and %s.", lowerCheckpointTime, upperCheckpointTime);
checkArgument(isAtOrAfter(lowerCheckpointTime, START_OF_TIME));
checkArgument(lowerCheckpointTime.isBefore(upperCheckpointTime));
// Load the boundary checkpoints - lower is exclusive and may not exist (on the first export,
// when lowerCheckpointTime is START_OF_TIME), whereas the upper is inclusive and must exist.
CommitLogCheckpoint lowerCheckpoint =
lowerCheckpointTime.isAfter(START_OF_TIME)
? verifyNotNull(
auditedOfy().load().key(CommitLogCheckpoint.createKey(lowerCheckpointTime)).now())
: null;
CommitLogCheckpoint upperCheckpoint =
verifyNotNull(
auditedOfy().load().key(CommitLogCheckpoint.createKey(upperCheckpointTime)).now());
// Load the keys of all the manifests to include in this diff.
List<Key<CommitLogManifest>> sortedKeys = loadAllDiffKeys(lowerCheckpoint, upperCheckpoint);
logger.atInfo().log("Found %d manifests to export.", sortedKeys.size());
// Open an output channel to GCS, wrapped in a stream for convenience.
try (OutputStream gcsStream =
gcsUtils.openOutputStream(
BlobId.of(gcsBucket, DIFF_FILE_PREFIX + upperCheckpointTime),
ImmutableMap.of(
LOWER_BOUND_CHECKPOINT, lowerCheckpointTime.toString(),
UPPER_BOUND_CHECKPOINT, upperCheckpointTime.toString(),
NUM_TRANSACTIONS, Integer.toString(sortedKeys.size())))) {
// Export the upper checkpoint itself.
serializeEntity(upperCheckpoint, gcsStream);
// If there are no manifests to export, stop early, now that we've written out the file with
// the checkpoint itself (which is needed for restores, even if it's empty).
if (sortedKeys.isEmpty()) {
return;
}
// Export to GCS in chunks, one per fixed batch of commit logs. While processing one batch,
// asynchronously load the entities for the next one.
List<List<Key<CommitLogManifest>>> keyChunks = partition(sortedKeys, batchSize);
// Objectify's map return type is asynchronous. Calling .values() will block until it loads.
Map<?, CommitLogManifest> nextChunkToExport = auditedOfy().load().keys(keyChunks.get(0));
for (int i = 0; i < keyChunks.size(); i++) {
// Force the async load to finish.
Collection<CommitLogManifest> chunkValues = nextChunkToExport.values();
logger.atInfo().log("Loaded %d manifests.", chunkValues.size());
// Since there is no hard bound on how much data this might be, take care not to let the
// Objectify session cache fill up and potentially run out of memory. This is the only safe
// point to do this since at this point there is no async load in progress.
auditedOfy().clearSessionCache();
// Kick off the next async load, which can happen in parallel to the current GCS export.
if (i + 1 < keyChunks.size()) {
nextChunkToExport = auditedOfy().load().keys(keyChunks.get(i + 1));
}
exportChunk(gcsStream, chunkValues);
logger.atInfo().log("Exported %d manifests.", chunkValues.size());
}
} catch (IOException e) {
throw new RuntimeException(e);
}
logger.atInfo().log("Exported %d total manifests.", sortedKeys.size());
}
/**
* Loads all the diff keys, sorted in a transaction-consistent chronological order.
*
* @param lowerCheckpoint exclusive lower bound on keys in this diff, or null if no lower bound
* @param upperCheckpoint inclusive upper bound on keys in this diff
*/
private ImmutableList<Key<CommitLogManifest>> loadAllDiffKeys(
@Nullable final CommitLogCheckpoint lowerCheckpoint,
final CommitLogCheckpoint upperCheckpoint) {
// Fetch the keys (no data) between these checkpoints, and sort by timestamp. This ordering is
// transaction-consistent by virtue of our checkpoint strategy and our customized Ofy; see
// CommitLogCheckpointStrategy for the proof. We break ties by sorting on bucket ID to ensure
// a deterministic order.
return upperCheckpoint
.getBucketTimestamps()
.keySet()
.stream()
.flatMap(
bucketNum ->
Streams.stream(loadDiffKeysFromBucket(lowerCheckpoint, upperCheckpoint, bucketNum)))
.sorted(
comparingLong(Key<CommitLogManifest>::getId)
.thenComparingLong(a -> a.getParent().getId()))
.collect(toImmutableList());
}
/**
* Loads the diff keys for one bucket.
*
* @param lowerCheckpoint exclusive lower bound on keys in this diff, or null if no lower bound
* @param upperCheckpoint inclusive upper bound on keys in this diff
* @param bucketNum the bucket to load diff keys from
*/
private Iterable<Key<CommitLogManifest>> loadDiffKeysFromBucket(
@Nullable CommitLogCheckpoint lowerCheckpoint,
CommitLogCheckpoint upperCheckpoint,
int bucketNum) {
// If no lower checkpoint exists, or if it exists but had no timestamp for this bucket number
// (because the bucket count was increased between these checkpoints), then use START_OF_TIME
// as the effective exclusive lower bound.
DateTime lowerCheckpointBucketTime =
firstNonNull(
(lowerCheckpoint == null) ? null : lowerCheckpoint.getBucketTimestamps().get(bucketNum),
START_OF_TIME);
// Since START_OF_TIME=0 is not a valid id in a key, add 1 to both bounds. Then instead of
// loading lowerBound < x <= upperBound, we can load lowerBound <= x < upperBound.
DateTime lowerBound = lowerCheckpointBucketTime.plusMillis(1);
DateTime upperBound = upperCheckpoint.getBucketTimestamps().get(bucketNum).plusMillis(1);
// If the lower and upper bounds are equal, there can't be any results, so skip the query.
if (lowerBound.equals(upperBound)) {
return ImmutableSet.of();
}
Key<CommitLogBucket> bucketKey = getBucketKey(bucketNum);
return auditedOfy()
.load()
.type(CommitLogManifest.class)
.ancestor(bucketKey)
.filterKey(">=", CommitLogManifest.createKey(bucketKey, lowerBound))
.filterKey("<", CommitLogManifest.createKey(bucketKey, upperBound))
.keys();
}
/** Writes a chunks-worth of manifests and associated mutations to GCS. */
private void exportChunk(OutputStream gcsStream, Collection<CommitLogManifest> chunk)
throws IOException {
// Kickoff async loads for all the manifests in the chunk.
ImmutableList.Builder<Iterable<? extends ImmutableObject>> entities =
new ImmutableList.Builder<>();
for (CommitLogManifest manifest : chunk) {
entities.add(ImmutableList.of(manifest));
entities.add(auditedOfy().load().type(CommitLogMutation.class).ancestor(manifest));
}
for (ImmutableObject entity : concat(entities.build())) {
serializeEntity(entity, gcsStream);
}
}
}

View file

@ -1,246 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static google.registry.backup.BackupUtils.GcsMetadataKeys.LOWER_BOUND_CHECKPOINT;
import static google.registry.backup.ExportCommitLogDiffAction.DIFF_FILE_PREFIX;
import static google.registry.util.DateTimeUtils.START_OF_TIME;
import static google.registry.util.DateTimeUtils.isBeforeOrAt;
import static google.registry.util.DateTimeUtils.latestOf;
import com.google.cloud.storage.BlobId;
import com.google.cloud.storage.BlobInfo;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import com.google.common.flogger.FluentLogger;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.UncheckedExecutionException;
import google.registry.backup.BackupModule.Backups;
import google.registry.gcs.GcsUtils;
import google.registry.model.annotations.DeleteAfterMigration;
import java.io.IOException;
import java.time.Duration;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.ScheduledExecutorService;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.inject.Provider;
import org.joda.time.DateTime;
/** Utility class to list commit logs diff files stored on GCS. */
@DeleteAfterMigration
class GcsDiffFileLister {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
/** Timeout for retrieving per-file information from GCS. */
private static final Duration FILE_INFO_TIMEOUT_DURATION = Duration.ofMinutes(1);
@Inject GcsUtils gcsUtils;
@Inject @Backups Provider<ListeningExecutorService> executorProvider;
@Inject ScheduledExecutorService scheduledExecutorService;
@Inject
GcsDiffFileLister() {}
/**
* Traverses the sequence of diff files backwards from checkpointTime and inserts the file
* metadata into "sequence". Returns true if a complete sequence was discovered, false if one or
* more files are missing.
*
* @throws UncheckedExecutionException wrapping a {@link java.util.concurrent.TimeoutException} if
* the GCS call fails to finish within one minute, or wrapping any other exception if
* something else goes wrong.
*/
private boolean constructDiffSequence(
String gcsBucket,
Map<DateTime, ListenableFuture<BlobInfo>> upperBoundTimesToBlobInfo,
DateTime fromTime,
DateTime lastTime,
TreeMap<DateTime, BlobInfo> sequence) {
DateTime checkpointTime = lastTime;
while (isBeforeOrAt(fromTime, checkpointTime)) {
BlobInfo blobInfo;
if (upperBoundTimesToBlobInfo.containsKey(checkpointTime)) {
blobInfo =
Futures.getUnchecked(
Futures.withTimeout(
upperBoundTimesToBlobInfo.get(checkpointTime),
FILE_INFO_TIMEOUT_DURATION,
scheduledExecutorService));
} else {
String filename = DIFF_FILE_PREFIX + checkpointTime;
logger.atInfo().log("Patching GCS list; discovered file: %s", filename);
blobInfo = getBlobInfo(gcsBucket, filename);
// If we hit a gap, quit.
if (blobInfo == null) {
logger.atWarning().log(
"Gap discovered in sequence terminating at %s, missing file: %s",
sequence.lastKey(), filename);
logger.atInfo().log("Found sequence from %s to %s.", checkpointTime, lastTime);
return false;
}
}
sequence.put(checkpointTime, blobInfo);
checkpointTime = getLowerBoundTime(blobInfo);
}
logger.atInfo().log("Found sequence from %s to %s.", checkpointTime, lastTime);
return true;
}
ImmutableList<BlobInfo> listDiffFiles(
String gcsBucket, DateTime fromTime, @Nullable DateTime toTime) {
logger.atInfo().log("Requested restore from time: %s", fromTime);
if (toTime != null) {
logger.atInfo().log(" Until time: %s", toTime);
}
// List all of the diff files on GCS and build a map from each file's upper checkpoint time
// (extracted from the filename) to its asynchronously-loaded metadata, keeping only files with
// an upper checkpoint time > fromTime.
TreeMap<DateTime, ListenableFuture<BlobInfo>> upperBoundTimesToBlobInfo = new TreeMap<>();
String commitLogDiffPrefix = getCommitLogDiffPrefix(fromTime, toTime);
ImmutableList<String> filenames;
try {
filenames =
gcsUtils.listFolderObjects(gcsBucket, commitLogDiffPrefix).stream()
.map(s -> commitLogDiffPrefix + s)
.collect(toImmutableList());
} catch (IOException e) {
throw new RuntimeException(e);
}
DateTime lastUpperBoundTime = START_OF_TIME;
TreeMap<DateTime, BlobInfo> sequence = new TreeMap<>();
ListeningExecutorService executor = executorProvider.get();
try {
for (String filename : filenames) {
String strippedFilename = filename.replaceFirst(DIFF_FILE_PREFIX, "");
DateTime upperBoundTime = DateTime.parse(strippedFilename);
if (isInRange(upperBoundTime, fromTime, toTime)) {
upperBoundTimesToBlobInfo.put(
upperBoundTime, executor.submit(() -> getBlobInfo(gcsBucket, filename)));
lastUpperBoundTime = latestOf(upperBoundTime, lastUpperBoundTime);
}
}
if (upperBoundTimesToBlobInfo.isEmpty()) {
logger.atInfo().log("No files found.");
return ImmutableList.of();
}
// Reconstruct the sequence of files by traversing backwards from "lastUpperBoundTime" (i.e.
// the last file that we found) and finding its previous file until we either run out of files
// or get to one that precedes "fromTime".
//
// GCS file listing is eventually consistent, so it's possible that we are missing a file. The
// metadata of a file is sufficient to identify the preceding file, so if we start from the
// last file and work backwards we can verify that we have no holes in our chain (although we
// may be missing files at the end).
logger.atInfo().log("Restoring until: %s", lastUpperBoundTime);
boolean inconsistentFileSet =
!constructDiffSequence(
gcsBucket, upperBoundTimesToBlobInfo, fromTime, lastUpperBoundTime, sequence);
// Verify that all of the elements in the original set are represented in the sequence. If we
// find anything that's not represented, construct a sequence for it.
boolean checkForMoreExtraDiffs = true; // Always loop at least once.
while (checkForMoreExtraDiffs) {
checkForMoreExtraDiffs = false;
for (DateTime key : upperBoundTimesToBlobInfo.descendingKeySet()) {
if (!isInRange(key, fromTime, toTime)) {
break;
}
if (!sequence.containsKey(key)) {
// Recalculate the sequence for purely informational purposes.
logger.atWarning().log(
"Fork found in commit log history. The following sequence "
+ "is disconnected from the sequence of the final commit:");
constructDiffSequence(gcsBucket, upperBoundTimesToBlobInfo, fromTime, key, sequence);
checkForMoreExtraDiffs = true;
inconsistentFileSet = true;
break;
}
}
}
checkState(
!inconsistentFileSet,
"Unable to compute commit diff history, there are either gaps or forks in the history "
+ "file set. Check log for details.");
} finally {
executor.shutdown();
}
logger.atInfo().log(
"Actual restore from time: %s", getLowerBoundTime(sequence.firstEntry().getValue()));
logger.atInfo().log("Found %d files to restore.", sequence.size());
return ImmutableList.copyOf(sequence.values());
}
/**
* Returns true if 'time' is in range of 'start' and 'end'.
*
* <p>If 'end' is null, returns true if 'time' is after 'start'.
*/
private boolean isInRange(DateTime time, DateTime start, @Nullable DateTime end) {
return isBeforeOrAt(start, time) && (end == null || isBeforeOrAt(time, end));
}
private DateTime getLowerBoundTime(BlobInfo blobInfo) {
return DateTime.parse(blobInfo.getMetadata().get(LOWER_BOUND_CHECKPOINT));
}
private BlobInfo getBlobInfo(String gcsBucket, String filename) {
return gcsUtils.getBlobInfo(BlobId.of(gcsBucket, filename));
}
/**
* Returns a prefix guaranteed to cover all commit log diff files in the given range.
*
* <p>The listObjects call can be fairly slow if we search over many thousands or tens of
* thousands of files, so we restrict the search space. The commit logs have a file format of
* "commit_diff_until_2021-05-11T06:48:00.070Z" so we can often filter down as far as the hour.
*
* <p>Here, we get the longest prefix possible based on which fields (year, month, day, hour) the
* times in question have in common.
*/
@VisibleForTesting
static String getCommitLogDiffPrefix(DateTime from, @Nullable DateTime to) {
StringBuilder result = new StringBuilder(DIFF_FILE_PREFIX);
if (to == null || from.getYear() != to.getYear()) {
return result.toString();
}
result.append(from.getYear()).append('-');
if (from.getMonthOfYear() != to.getMonthOfYear()) {
return result.toString();
}
result.append(String.format("%02d-", from.getMonthOfYear()));
if (from.getDayOfMonth() != to.getDayOfMonth()) {
return result.toString();
}
result.append(String.format("%02dT", from.getDayOfMonth()));
if (from.getHourOfDay() != to.getHourOfDay()) {
return result.toString();
}
result.append(String.format("%02d:", from.getHourOfDay()));
return result.toString();
}
}

View file

@ -1,208 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.collect.Iterators.peekingIterator;
import static google.registry.backup.BackupUtils.createDeserializingIterator;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import com.google.appengine.api.datastore.DatastoreService;
import com.google.appengine.api.datastore.Entity;
import com.google.appengine.api.datastore.EntityTranslator;
import com.google.cloud.storage.BlobInfo;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.PeekingIterator;
import com.google.common.collect.Streams;
import com.google.common.flogger.FluentLogger;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.Result;
import com.googlecode.objectify.util.ResultNow;
import google.registry.config.RegistryConfig.Config;
import google.registry.config.RegistryEnvironment;
import google.registry.gcs.GcsUtils;
import google.registry.model.ImmutableObject;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.ofy.CommitLogBucket;
import google.registry.model.ofy.CommitLogCheckpoint;
import google.registry.model.ofy.CommitLogCheckpointRoot;
import google.registry.model.ofy.CommitLogManifest;
import google.registry.model.ofy.CommitLogMutation;
import google.registry.request.Action;
import google.registry.request.Parameter;
import google.registry.request.auth.Auth;
import google.registry.util.Retrier;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Stream;
import javax.inject.Inject;
import org.joda.time.DateTime;
/** Restore Registry 2 commit logs from GCS to Datastore. */
@Action(
service = Action.Service.TOOLS,
path = RestoreCommitLogsAction.PATH,
method = Action.Method.POST,
automaticallyPrintOk = true,
auth = Auth.AUTH_INTERNAL_OR_ADMIN)
@DeleteAfterMigration
public class RestoreCommitLogsAction implements Runnable {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
public static final String PATH = "/_dr/task/restoreCommitLogs";
static final String DRY_RUN_PARAM = "dryRun";
static final String FROM_TIME_PARAM = "fromTime";
static final String TO_TIME_PARAM = "toTime";
static final String BUCKET_OVERRIDE_PARAM = "gcsBucket";
private static final ImmutableSet<RegistryEnvironment> FORBIDDEN_ENVIRONMENTS =
ImmutableSet.of(RegistryEnvironment.PRODUCTION, RegistryEnvironment.SANDBOX);
@Inject GcsUtils gcsUtils;
@Inject @Parameter(DRY_RUN_PARAM) boolean dryRun;
@Inject @Parameter(FROM_TIME_PARAM) DateTime fromTime;
@Inject @Parameter(TO_TIME_PARAM) DateTime toTime;
@Inject
@Parameter(BUCKET_OVERRIDE_PARAM)
Optional<String> gcsBucketOverride;
@Inject DatastoreService datastoreService;
@Inject GcsDiffFileLister diffLister;
@Inject
@Config("commitLogGcsBucket")
String defaultGcsBucket;
@Inject Retrier retrier;
@Inject RestoreCommitLogsAction() {}
@Override
public void run() {
checkArgument(
!FORBIDDEN_ENVIRONMENTS.contains(RegistryEnvironment.get()),
"DO NOT RUN IN PRODUCTION OR SANDBOX.");
if (dryRun) {
logger.atInfo().log("Running in dry-run mode.");
}
String gcsBucket = gcsBucketOverride.orElse(defaultGcsBucket);
logger.atInfo().log("Restoring from %s.", gcsBucket);
List<BlobInfo> diffFiles = diffLister.listDiffFiles(gcsBucket, fromTime, toTime);
if (diffFiles.isEmpty()) {
logger.atInfo().log("Nothing to restore.");
return;
}
Map<Integer, DateTime> bucketTimestamps = new HashMap<>();
CommitLogCheckpoint lastCheckpoint = null;
for (BlobInfo metadata : diffFiles) {
logger.atInfo().log("Restoring: %s", metadata.getName());
try (InputStream input = gcsUtils.openInputStream(metadata.getBlobId())) {
PeekingIterator<ImmutableObject> commitLogs =
peekingIterator(createDeserializingIterator(input, true));
lastCheckpoint = (CommitLogCheckpoint) commitLogs.next();
saveOfy(ImmutableList.of(lastCheckpoint)); // Save the checkpoint itself.
while (commitLogs.hasNext()) {
CommitLogManifest manifest = restoreOneTransaction(commitLogs);
bucketTimestamps.put(manifest.getBucketId(), manifest.getCommitTime());
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
// Restore the CommitLogCheckpointRoot and CommitLogBuckets.
saveOfy(
Streams.concat(
bucketTimestamps
.entrySet()
.stream()
.map(
entry ->
new CommitLogBucket.Builder()
.setBucketNum(entry.getKey())
.setLastWrittenTime(entry.getValue())
.build()),
Stream.of(CommitLogCheckpointRoot.create(lastCheckpoint.getCheckpointTime())))
.collect(toImmutableList()));
logger.atInfo().log("Restore complete.");
}
/**
* Restore the contents of one transaction to Datastore.
*
* <p>The objects to delete are listed in the {@link CommitLogManifest}, which will be the first
* object in the iterable. The objects to save follow, each as a {@link CommitLogMutation}. We
* restore by deleting the deletes and recreating the saves from their proto form. We also save
* the commit logs themselves back to Datastore, so that the commit log system itself is
* transparently restored alongside the data.
*
* @return the manifest, for use in restoring the {@link CommitLogBucket}.
*/
private CommitLogManifest restoreOneTransaction(PeekingIterator<ImmutableObject> commitLogs) {
final CommitLogManifest manifest = (CommitLogManifest) commitLogs.next();
Result<?> deleteResult = deleteAsync(manifest.getDeletions());
List<Entity> entitiesToSave = Lists.newArrayList(auditedOfy().save().toEntity(manifest));
while (commitLogs.hasNext() && commitLogs.peek() instanceof CommitLogMutation) {
CommitLogMutation mutation = (CommitLogMutation) commitLogs.next();
entitiesToSave.add(auditedOfy().save().toEntity(mutation));
entitiesToSave.add(EntityTranslator.createFromPbBytes(mutation.getEntityProtoBytes()));
}
saveRaw(entitiesToSave);
try {
deleteResult.now();
} catch (Exception e) {
retrier.callWithRetry(
() -> deleteAsync(manifest.getDeletions()).now(), RuntimeException.class);
}
return manifest;
}
private void saveRaw(List<Entity> entitiesToSave) {
if (dryRun) {
logger.atInfo().log("Would have saved entities: %s", entitiesToSave);
return;
}
retrier.callWithRetry(() -> datastoreService.put(entitiesToSave), RuntimeException.class);
}
private void saveOfy(Iterable<? extends ImmutableObject> objectsToSave) {
if (dryRun) {
logger.atInfo().log("Would have saved entities: %s", objectsToSave);
return;
}
retrier.callWithRetry(
() -> auditedOfy().saveWithoutBackup().entities(objectsToSave).now(),
RuntimeException.class);
}
private Result<?> deleteAsync(Set<Key<?>> keysToDelete) {
if (dryRun) {
logger.atInfo().log("Would have deleted entities: %s", keysToDelete);
}
return dryRun || keysToDelete.isEmpty()
? new ResultNow<Void>(null)
: auditedOfy().deleteWithoutBackup().keys(keysToDelete);
}
}

View file

@ -1,200 +0,0 @@
// Copyright 2020 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.backup;
import com.google.appengine.api.datastore.Entity;
import com.google.appengine.api.datastore.EntityTranslator;
import com.google.appengine.api.datastore.Key;
import com.google.auto.value.AutoValue;
import com.google.auto.value.extension.memoized.Memoized;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.ofy.CommitLogManifest;
import google.registry.model.ofy.CommitLogMutation;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Optional;
import java.util.stream.Stream;
import javax.annotation.Nullable;
/**
* A Datastore {@link Entity Entity's} serialized state with timestamp. The intended use case is a
* multi-stage pipeline where an Entity's Java form is not needed in most stages.
*
* <p>For a new or updated Entity, its serialized bytes are stored along with its Datastore {@link
* Key}. For a deleted entity, only its Datastore {@link Key} is stored, and the {@link
* #entityProtoBytes} field is left unset.
*
* <p>Storing raw bytes is motivated by two factors. First, since I/O is frequent and the Java
* objects are rarely needed in our target use case, storing raw bytes is the most efficient
* approach. More importantly, due to our data model and our customization of {@link
* google.registry.model.ofy.ObjectifyService ObjectifyService}, it is challenging to implement a
* serializer for Objectify entities that preserves the value of all properties. Without such
* serializers, Objectify entities cannot be used in a pipeline.
*
* <p>Objectify entities do not implement {@link Serializable}, serialization of such objects is as
* follows:
*
* <ul>
* <li>Convert an Objectify entity to a Datastore {@link Entity}: {@code
* auditedOfy().save().toEntity(..)}
* <li>Entity is serializable, but the more efficient approach is to convert an Entity to a
* ProtocolBuffer ({@link com.google.storage.onestore.v3.OnestoreEntity.EntityProto}) and then
* to raw bytes.
* </ul>
*
* <p>When the first conversion above is applied to an Objectify entity, a property value in the
* output may differ from the input in two situations:
*
* <ul>
* <li>If a property is of an assign-on-persist data type, e.g., {@link
* google.registry.model.UpdateAutoTimestamp}.
* <li>If it is related to CommitLog management, e.g., {@link google.registry.model.EppResource
* EppResource.revisions}.
* </ul>
*
* <p>Working around the side effects caused by our customization is difficult. Any solution would
* likely rely on Objectify's stack of context. However, many Objectify invocations in our code base
* are hardcoded to call the customized version of ObjectifyService, rendering Objectify's stack
* useless.
*
* <p>For now, this inability to use Objectify entities in pipelines is mostly a testing problem: we
* can not perform {@link org.apache.beam.sdk.testing.PAssert BEAM pipeline assertions} on Objectify
* entities. {@code InitSqlTestUtils.assertContainsExactlyElementsIn} is an example of a workaround.
*
* <p>Note that {@link Optional java.util.Optional} is not serializable, therefore cannot be used as
* property type in this class.
*/
@AutoValue
@DeleteAfterMigration
public abstract class VersionedEntity implements Serializable {
private static final long serialVersionUID = 1L;
public abstract long commitTimeMills();
/** The {@link Key} of the {@link Entity}. */
public abstract Key key();
/** Serialized form of the {@link Entity}. This property is {@code null} for a deleted Entity. */
@Nullable
abstract ImmutableBytes entityProtoBytes();
@Memoized
public Optional<Entity> getEntity() {
return Optional.ofNullable(entityProtoBytes())
.map(ImmutableBytes::getBytes)
.map(EntityTranslator::createFromPbBytes);
}
public boolean isDelete() {
return entityProtoBytes() == null;
}
/**
* Converts deleted entity keys in {@code manifest} into a {@link Stream} of {@link
* VersionedEntity VersionedEntities}. See {@link CommitLogImports#loadEntities} for more
* information.
*/
static Stream<VersionedEntity> fromManifest(CommitLogManifest manifest) {
long commitTimeMillis = manifest.getCommitTime().getMillis();
return manifest.getDeletions().stream()
.map(com.googlecode.objectify.Key::getRaw)
.map(key -> newBuilder().commitTimeMills(commitTimeMillis).key(key).build());
}
/* Converts a {@link CommitLogMutation} to a {@link VersionedEntity}. */
static VersionedEntity fromMutation(CommitLogMutation mutation) {
return from(
com.googlecode.objectify.Key.create(mutation).getParent().getId(),
mutation.getEntityProtoBytes());
}
public static VersionedEntity from(long commitTimeMillis, byte[] entityProtoBytes) {
return newBuilder()
.entityProtoBytes(entityProtoBytes)
.key(EntityTranslator.createFromPbBytes(entityProtoBytes).getKey())
.commitTimeMills(commitTimeMillis)
.build();
}
private static Builder newBuilder() {
return new AutoValue_VersionedEntity.Builder();
}
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder commitTimeMills(long commitTimeMillis);
abstract Builder entityProtoBytes(ImmutableBytes bytes);
public abstract Builder key(Key key);
public abstract VersionedEntity build();
Builder entityProtoBytes(byte[] bytes) {
return entityProtoBytes(new ImmutableBytes(bytes));
}
}
/**
* Wraps a byte array and prevents it from being modified by its original owner.
*
* <p>While this class seems an overkill, it exists for two reasons:
*
* <ul>
* <li>It is easier to override the {@link #equals} method here (for value-equivalence check)
* than to override the AutoValue-generated {@code equals} method.
* <li>To appease the style checker, which forbids arrays as AutoValue property.
* </ul>
*/
static final class ImmutableBytes implements Serializable {
private static final long serialVersionUID = 1L;
private final byte[] bytes;
ImmutableBytes(byte[] bytes) {
this.bytes = Arrays.copyOf(bytes, bytes.length);
}
/**
* Returns the saved byte array. Invocation is restricted to trusted callers, who must not
* modify the array.
*/
byte[] getBytes() {
return bytes;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof ImmutableBytes)) {
return false;
}
ImmutableBytes that = (ImmutableBytes) o;
// Do not use Objects.equals, which checks reference identity instead of data in array.
return Arrays.equals(bytes, that.bytes);
}
@Override
public int hashCode() {
// Do not use Objects.hashCode, which hashes the reference, not the data in array.
return Arrays.hashCode(bytes);
}
}
}

View file

@ -1,16 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
@javax.annotation.ParametersAreNonnullByDefault
package google.registry.backup;

View file

@ -71,7 +71,7 @@ public final class AsyncTaskEnqueuer {
public AsyncTaskEnqueuer(
@Named(QUEUE_ASYNC_DELETE) Queue asyncDeletePullQueue,
@Named(QUEUE_ASYNC_HOST_RENAME) Queue asyncDnsRefreshPullQueue,
@Config("asyncDeleteFlowMapreduceDelay") Duration asyncDeleteDelay,
@Config("asyncDeleteDelay") Duration asyncDeleteDelay,
CloudTasksUtils cloudTasksUtils,
Retrier retrier) {
this.asyncDeletePullQueue = asyncDeletePullQueue;

View file

@ -21,6 +21,7 @@ import static google.registry.batch.AsyncTaskEnqueuer.PARAM_RESOURCE_KEY;
import static google.registry.batch.AsyncTaskEnqueuer.QUEUE_ASYNC_ACTIONS;
import static google.registry.batch.AsyncTaskEnqueuer.QUEUE_ASYNC_DELETE;
import static google.registry.batch.AsyncTaskEnqueuer.QUEUE_ASYNC_HOST_RENAME;
import static google.registry.request.RequestParameters.extractBooleanParameter;
import static google.registry.request.RequestParameters.extractIntParameter;
import static google.registry.request.RequestParameters.extractLongParameter;
import static google.registry.request.RequestParameters.extractOptionalBooleanParameter;
@ -45,6 +46,9 @@ import org.joda.time.DateTime;
@Module
public class BatchModule {
public static final String PARAM_DRY_RUN = "dryRun";
public static final String PARAM_FAST = "fast";
@Provides
@Parameter("jobName")
static Optional<String> provideJobName(HttpServletRequest req) {
@ -113,9 +117,15 @@ public class BatchModule {
}
@Provides
@Parameter(ResaveAllEppResourcesPipelineAction.PARAM_FAST)
static Optional<Boolean> provideIsFast(HttpServletRequest req) {
return extractOptionalBooleanParameter(req, ResaveAllEppResourcesPipelineAction.PARAM_FAST);
@Parameter(PARAM_FAST)
static boolean provideIsFast(HttpServletRequest req) {
return extractBooleanParameter(req, PARAM_FAST);
}
@Provides
@Parameter(PARAM_DRY_RUN)
static boolean provideIsDryRun(HttpServletRequest req) {
return extractBooleanParameter(req, PARAM_DRY_RUN);
}
@Provides

View file

@ -16,39 +16,30 @@ package google.registry.batch;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import static google.registry.batch.BatchModule.PARAM_DRY_RUN;
import static google.registry.config.RegistryEnvironment.PRODUCTION;
import static google.registry.mapreduce.MapreduceRunner.PARAM_DRY_RUN;
import static google.registry.mapreduce.inputs.EppResourceInputs.createEntityInput;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
import static google.registry.request.Action.Method.POST;
import static google.registry.util.DateTimeUtils.END_OF_TIME;
import com.google.appengine.tools.mapreduce.Mapper;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.flogger.FluentLogger;
import com.googlecode.objectify.Key;
import google.registry.config.RegistryEnvironment;
import google.registry.flows.poll.PollFlowUtils;
import google.registry.mapreduce.MapreduceRunner;
import google.registry.model.EppResource;
import google.registry.model.EppResourceUtils;
import google.registry.model.contact.ContactResource;
import google.registry.model.domain.DomainBase;
import google.registry.model.host.HostResource;
import google.registry.model.index.EppResourceIndex;
import google.registry.model.index.ForeignKeyIndex;
import google.registry.model.poll.PollMessage;
import google.registry.model.reporting.HistoryEntry;
import google.registry.model.reporting.HistoryEntryDao;
import google.registry.persistence.VKey;
import google.registry.request.Action;
import google.registry.request.Parameter;
import google.registry.request.Response;
import google.registry.request.auth.Auth;
import google.registry.util.Clock;
import java.util.List;
import javax.inject.Inject;
/**
@ -57,8 +48,8 @@ import javax.inject.Inject;
*
* <p>This only deletes contacts and hosts, NOT domains. To delete domains, use {@link
* DeleteProberDataAction} and pass it the TLD(s) that the load test domains were created on. Note
* that DeleteProberDataAction is safe enough to run in production whereas this mapreduce is not,
* but this one does not need to be runnable in production because load testing isn't run against
* that DeleteProberDataAction is safe enough to run in production whereas this action is not, but
* this one does not need to be runnable in production because load testing isn't run against
* production.
*/
@Action(
@ -79,48 +70,31 @@ public class DeleteLoadTestDataAction implements Runnable {
private static final ImmutableSet<String> LOAD_TEST_REGISTRARS = ImmutableSet.of("proxy");
private final boolean isDryRun;
private final MapreduceRunner mrRunner;
private final Response response;
private final Clock clock;
@Inject
DeleteLoadTestDataAction(
@Parameter(PARAM_DRY_RUN) boolean isDryRun,
MapreduceRunner mrRunner,
Response response,
Clock clock) {
this.isDryRun = isDryRun;
this.mrRunner = mrRunner;
this.response = response;
this.clock = clock;
}
@Override
public void run() {
// This mapreduce doesn't guarantee that foreign key relations are preserved, so isn't safe to
// This action doesn't guarantee that foreign key relations are preserved, so isn't safe to
// run on production. On other environments, data is fully wiped out occasionally anyway, so
// having some broken data that isn't referred to isn't the end of the world.
checkState(
!RegistryEnvironment.get().equals(PRODUCTION),
"This mapreduce is not safe to run on PRODUCTION.");
"This action is not safe to run on PRODUCTION.");
if (tm().isOfy()) {
mrRunner
.setJobName("Delete load test data")
.setModuleName("backend")
.runMapOnly(
new DeleteLoadTestDataMapper(isDryRun),
ImmutableList.of(
createEntityInput(ContactResource.class), createEntityInput(HostResource.class)))
.sendLinkToMapreduceConsole(response);
} else {
tm().transact(
() -> {
LOAD_TEST_REGISTRARS.forEach(this::deletePollMessages);
tm().loadAllOfStream(ContactResource.class).forEach(this::deleteContact);
tm().loadAllOfStream(HostResource.class).forEach(this::deleteHost);
});
}
tm().transact(
() -> {
LOAD_TEST_REGISTRARS.forEach(this::deletePollMessages);
tm().loadAllOfStream(ContactResource.class).forEach(this::deleteContact);
tm().loadAllOfStream(HostResource.class).forEach(this::deleteHost);
});
}
private void deletePollMessages(String registrarId) {
@ -184,54 +158,4 @@ public class DeleteLoadTestDataAction implements Runnable {
tm().delete(eppResource);
}
}
/** Provides the map method that runs for each existing contact and host entity. */
public static class DeleteLoadTestDataMapper extends Mapper<EppResource, Void, Void> {
private static final long serialVersionUID = -3817710674062432694L;
private final boolean isDryRun;
public DeleteLoadTestDataMapper(boolean isDryRun) {
this.isDryRun = isDryRun;
}
@Override
public final void map(EppResource resource) {
if (LOAD_TEST_REGISTRARS.contains(resource.getPersistedCurrentSponsorRegistrarId())) {
deleteResource(resource);
getContext()
.incrementCounter(
String.format("deleted %s entities", resource.getClass().getSimpleName()));
} else {
getContext().incrementCounter("skipped, not load test data");
}
}
private void deleteResource(EppResource resource) {
final Key<EppResourceIndex> eppIndex =
Key.create(EppResourceIndex.create(Key.create(resource)));
final Key<? extends ForeignKeyIndex<?>> fki = ForeignKeyIndex.createKey(resource);
int numEntitiesDeleted =
tm().transact(
() -> {
// This ancestor query selects all descendant entities.
List<Key<Object>> resourceAndDependentKeys =
auditedOfy().load().ancestor(resource).keys().list();
ImmutableSet<Key<?>> allKeys =
new ImmutableSet.Builder<Key<?>>()
.add(fki)
.add(eppIndex)
.addAll(resourceAndDependentKeys)
.build();
if (isDryRun) {
logger.atInfo().log("Would hard-delete the following entities: %s", allKeys);
} else {
auditedOfy().deleteWithoutBackup().keys(allKeys);
}
return allKeys.size();
});
getContext().incrementCounter("total entities deleted", numEntitiesDeleted);
}
}
}

View file

@ -17,45 +17,33 @@ package google.registry.batch;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import static google.registry.batch.BatchModule.PARAM_DRY_RUN;
import static google.registry.config.RegistryEnvironment.PRODUCTION;
import static google.registry.mapreduce.MapreduceRunner.PARAM_DRY_RUN;
import static google.registry.model.ResourceTransferUtils.updateForeignKeyIndexDeletionTime;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import static google.registry.model.reporting.HistoryEntry.Type.DOMAIN_DELETE;
import static google.registry.model.tld.Registries.getTldsOfType;
import static google.registry.persistence.transaction.TransactionManagerFactory.jpaTm;
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
import static google.registry.request.Action.Method.POST;
import static google.registry.request.RequestParameters.PARAM_TLDS;
import static org.joda.time.DateTimeZone.UTC;
import com.google.appengine.tools.mapreduce.Mapper;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.google.common.flogger.FluentLogger;
import com.googlecode.objectify.Key;
import google.registry.config.RegistryConfig.Config;
import google.registry.config.RegistryEnvironment;
import google.registry.dns.DnsQueue;
import google.registry.mapreduce.MapreduceRunner;
import google.registry.mapreduce.inputs.EppResourceInputs;
import google.registry.model.CreateAutoTimestamp;
import google.registry.model.EppResourceUtils;
import google.registry.model.domain.DomainBase;
import google.registry.model.domain.DomainHistory;
import google.registry.model.index.EppResourceIndex;
import google.registry.model.index.ForeignKeyIndex;
import google.registry.model.tld.Registry;
import google.registry.model.tld.Registry.TldType;
import google.registry.request.Action;
import google.registry.request.Parameter;
import google.registry.request.Response;
import google.registry.request.auth.Auth;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import javax.inject.Inject;
import org.hibernate.CacheMode;
@ -94,8 +82,6 @@ public class DeleteProberDataAction implements Runnable {
*/
private static final Duration SOFT_DELETE_DELAY = Duration.standardHours(1);
private static final DnsQueue dnsQueue = DnsQueue.create();
// Domains to delete must:
// 1. Be in one of the prober TLDs
// 2. Not be a nic domain
@ -115,6 +101,8 @@ public class DeleteProberDataAction implements Runnable {
/** Number of domains to retrieve and delete per SQL transaction. */
private static final int BATCH_SIZE = 1000;
@Inject DnsQueue dnsQueue;
@Inject @Parameter(PARAM_DRY_RUN) boolean isDryRun;
/** List of TLDs to work on. If empty - will work on all TLDs that end with .test. */
@Inject @Parameter(PARAM_TLDS) ImmutableSet<String> tlds;
@ -123,7 +111,6 @@ public class DeleteProberDataAction implements Runnable {
@Config("registryAdminClientId")
String registryAdminRegistrarId;
@Inject MapreduceRunner mrRunner;
@Inject Response response;
@Inject DeleteProberDataAction() {}
@ -145,21 +132,7 @@ public class DeleteProberDataAction implements Runnable {
"If tlds are given, they must all exist and be TEST tlds. Given: %s, not found: %s",
tlds,
Sets.difference(tlds, deletableTlds));
ImmutableSet<String> proberRoidSuffixes =
deletableTlds.stream()
.map(tld -> Registry.get(tld).getRoidSuffix())
.collect(toImmutableSet());
if (tm().isOfy()) {
mrRunner
.setJobName("Delete prober data")
.setModuleName("backend")
.runMapOnly(
new DeleteProberDataMapper(proberRoidSuffixes, isDryRun, registryAdminRegistrarId),
ImmutableList.of(EppResourceInputs.createKeyInput(DomainBase.class)))
.sendLinkToMapreduceConsole(response);
} else {
runSqlJob(deletableTlds);
}
runSqlJob(deletableTlds);
}
private void runSqlJob(ImmutableSet<String> deletableTlds) {
@ -231,7 +204,7 @@ public class DeleteProberDataAction implements Runnable {
"Would soft-delete the active domain: %s (%s).",
domain.getDomainName(), domain.getRepoId());
} else {
softDeleteDomain(domain, registryAdminRegistrarId, dnsQueue);
softDeleteDomain(domain);
}
softDeletedDomains.incrementAndGet();
} else {
@ -280,8 +253,7 @@ public class DeleteProberDataAction implements Runnable {
}
// Take a DNS queue + admin registrar id as input so that it can be called from the mapper as well
private static void softDeleteDomain(
DomainBase domain, String registryAdminRegistrarId, DnsQueue localDnsQueue) {
private void softDeleteDomain(DomainBase domain) {
DomainBase deletedDomain =
domain.asBuilder().setDeletionTime(tm().getTransactionTime()).setStatusValues(null).build();
DomainHistory historyEntry =
@ -299,119 +271,6 @@ public class DeleteProberDataAction implements Runnable {
tm().putAllWithoutBackup(ImmutableList.of(deletedDomain, historyEntry));
// updating foreign keys is a no-op in SQL
updateForeignKeyIndexDeletionTime(deletedDomain);
localDnsQueue.addDomainRefreshTask(deletedDomain.getDomainName());
}
/** Provides the map method that runs for each existing DomainBase entity. */
public static class DeleteProberDataMapper extends Mapper<Key<DomainBase>, Void, Void> {
private static final DnsQueue dnsQueue = DnsQueue.create();
private static final long serialVersionUID = -7724537393697576369L;
private final ImmutableSet<String> proberRoidSuffixes;
private final Boolean isDryRun;
private final String registryAdminRegistrarId;
public DeleteProberDataMapper(
ImmutableSet<String> proberRoidSuffixes,
Boolean isDryRun,
String registryAdminRegistrarId) {
this.proberRoidSuffixes = proberRoidSuffixes;
this.isDryRun = isDryRun;
this.registryAdminRegistrarId = registryAdminRegistrarId;
}
@Override
public final void map(Key<DomainBase> key) {
try {
String roidSuffix = Iterables.getLast(Splitter.on('-').split(key.getName()));
if (proberRoidSuffixes.contains(roidSuffix)) {
deleteDomain(key);
} else {
getContext().incrementCounter("skipped, non-prober data");
}
} catch (Throwable t) {
logger.atSevere().withCause(t).log("Error while deleting prober data for key %s.", key);
getContext().incrementCounter(String.format("error, kind %s", key.getKind()));
}
}
private void deleteDomain(final Key<DomainBase> domainKey) {
final DomainBase domain = auditedOfy().load().key(domainKey).now();
DateTime now = DateTime.now(UTC);
if (domain == null) {
// Depending on how stale Datastore indexes are, we can get keys to resources that are
// already deleted (e.g. by a recent previous invocation of this mapreduce). So ignore them.
getContext().incrementCounter("already deleted");
return;
}
String domainName = domain.getDomainName();
if (domainName.equals("nic." + domain.getTld())) {
getContext().incrementCounter("skipped, NIC domain");
return;
}
if (now.isBefore(domain.getCreationTime().plus(DOMAIN_USED_DURATION))) {
getContext().incrementCounter("skipped, domain too new");
return;
}
if (!domain.getSubordinateHosts().isEmpty()) {
logger.atWarning().log(
"Cannot delete domain %s (%s) because it has subordinate hosts.",
domainName, domainKey);
getContext().incrementCounter("skipped, had subordinate host(s)");
return;
}
// If the domain is still active, that means that the prober encountered a failure and did not
// successfully soft-delete the domain (thus leaving its DNS entry published). We soft-delete
// it now so that the DNS entry can be handled. The domain will then be hard-deleted the next
// time the mapreduce is run.
if (EppResourceUtils.isActive(domain, now)) {
if (isDryRun) {
logger.atInfo().log(
"Would soft-delete the active domain: %s (%s).", domainName, domainKey);
} else {
tm().transact(() -> softDeleteDomain(domain, registryAdminRegistrarId, dnsQueue));
}
getContext().incrementCounter("domains soft-deleted");
return;
}
// If the domain isn't active, we want to make sure it hasn't been active for "a while" before
// deleting it. This prevents accidental double-map with the same key from immediately
// deleting active domains
if (now.isBefore(domain.getDeletionTime().plus(SOFT_DELETE_DELAY))) {
getContext().incrementCounter("skipped, domain too recently soft deleted");
return;
}
final Key<EppResourceIndex> eppIndex = Key.create(EppResourceIndex.create(domainKey));
final Key<? extends ForeignKeyIndex<?>> fki = ForeignKeyIndex.createKey(domain);
int entitiesDeleted =
tm().transact(
() -> {
// This ancestor query selects all descendant HistoryEntries, BillingEvents,
// PollMessages, and TLD-specific entities, as well as the domain itself.
List<Key<Object>> domainAndDependentKeys =
auditedOfy().load().ancestor(domainKey).keys().list();
ImmutableSet<Key<?>> allKeys =
new ImmutableSet.Builder<Key<?>>()
.add(fki)
.add(eppIndex)
.addAll(domainAndDependentKeys)
.build();
if (isDryRun) {
logger.atInfo().log("Would hard-delete the following entities: %s", allKeys);
} else {
auditedOfy().deleteWithoutBackup().keys(allKeys);
}
return allKeys.size();
});
getContext().incrementCounter("domains hard-deleted");
getContext().incrementCounter("total entities hard-deleted", entitiesDeleted);
}
dnsQueue.addDomainRefreshTask(deletedDomain.getDomainName());
}
}

View file

@ -18,11 +18,9 @@ import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import static com.google.common.collect.Sets.difference;
import static com.google.common.collect.Sets.newHashSet;
import static google.registry.mapreduce.MapreduceRunner.PARAM_DRY_RUN;
import static google.registry.mapreduce.inputs.EppResourceInputs.createChildEntityInput;
import static google.registry.batch.BatchModule.PARAM_DRY_RUN;
import static google.registry.model.common.Cursor.CursorType.RECURRING_BILLING;
import static google.registry.model.domain.Period.Unit.YEARS;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import static google.registry.model.reporting.HistoryEntry.Type.DOMAIN_AUTORENEW;
import static google.registry.persistence.transaction.QueryComposer.Comparator.EQ;
import static google.registry.persistence.transaction.TransactionManagerFactory.jpaTm;
@ -34,19 +32,12 @@ import static google.registry.util.DateTimeUtils.START_OF_TIME;
import static google.registry.util.DateTimeUtils.earliestOf;
import static google.registry.util.DomainNameUtils.getTldFromDomainName;
import com.google.appengine.tools.mapreduce.Mapper;
import com.google.appengine.tools.mapreduce.Reducer;
import com.google.appengine.tools.mapreduce.ReducerInput;
import com.google.auto.value.AutoValue;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Range;
import com.google.common.collect.Streams;
import com.google.common.flogger.FluentLogger;
import google.registry.config.RegistryConfig.Config;
import google.registry.flows.domain.DomainPricingLogic;
import google.registry.mapreduce.MapreduceRunner;
import google.registry.mapreduce.inputs.NullInput;
import google.registry.model.ImmutableObject;
import google.registry.model.billing.BillingEvent;
import google.registry.model.billing.BillingEvent.Flag;
@ -74,9 +65,9 @@ import org.joda.money.Money;
import org.joda.time.DateTime;
/**
* A mapreduce that expands {@link Recurring} billing events into synthetic {@link OneTime} events.
* An action that expands {@link Recurring} billing events into synthetic {@link OneTime} events.
*
* <p>The cursor used throughout this mapreduce (overridden if necessary using the parameter {@code
* <p>The cursor used throughout this action (overridden if necessary using the parameter {@code
* cursorTime}) represents the inclusive lower bound on the range of billing times that will be
* expanded as a result of the job (the exclusive upper bound being the execution time of the job).
*/
@ -87,11 +78,9 @@ import org.joda.time.DateTime;
public class ExpandRecurringBillingEventsAction implements Runnable {
public static final String PARAM_CURSOR_TIME = "cursorTime";
private static final String ERROR_COUNTER = "errors";
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
@Inject Clock clock;
@Inject MapreduceRunner mrRunner;
@Inject
@Config("jdbcBatchSize")
@ -100,7 +89,6 @@ public class ExpandRecurringBillingEventsAction implements Runnable {
@Inject @Parameter(PARAM_DRY_RUN) boolean isDryRun;
@Inject @Parameter(PARAM_CURSOR_TIME) Optional<DateTime> cursorTimeParam;
@Inject DomainPricingLogic domainPricingLogic;
@Inject Response response;
@Inject ExpandRecurringBillingEventsAction() {}
@ -119,22 +107,7 @@ public class ExpandRecurringBillingEventsAction implements Runnable {
logger.atInfo().log(
"Running Recurring billing event expansion for billing time range [%s, %s).",
cursorTime, executeTime);
if (tm().isOfy()) {
mrRunner
.setJobName("Expand Recurring billing events into synthetic OneTime events.")
.setModuleName("backend")
.runMapreduce(
new ExpandRecurringBillingEventsMapper(isDryRun, cursorTime, clock.nowUtc()),
new ExpandRecurringBillingEventsReducer(isDryRun, persistedCursorTime),
// Add an extra shard that maps over a null recurring event (see the mapper for why).
ImmutableList.of(
new NullInput<>(),
createChildEntityInput(
ImmutableSet.of(DomainBase.class), ImmutableSet.of(Recurring.class))))
.sendLinkToMapreduceConsole(response);
} else {
expandSqlBillingEventsInBatches(executeTime, cursorTime, persistedCursorTime);
}
expandSqlBillingEventsInBatches(executeTime, cursorTime, persistedCursorTime);
}
private void expandSqlBillingEventsInBatches(
@ -257,118 +230,13 @@ public class ExpandRecurringBillingEventsAction implements Runnable {
}
}
/** Mapper to expand {@link Recurring} billing events into synthetic {@link OneTime} events. */
public static class ExpandRecurringBillingEventsMapper
extends Mapper<Recurring, DateTime, DateTime> {
private static final long serialVersionUID = 8376442755556228455L;
private final boolean isDryRun;
private final DateTime cursorTime;
private final DateTime executeTime;
public ExpandRecurringBillingEventsMapper(
boolean isDryRun, DateTime cursorTime, DateTime executeTime) {
this.isDryRun = isDryRun;
this.cursorTime = cursorTime;
this.executeTime = executeTime;
}
@Override
public final void map(final Recurring recurring) {
// This single emit forces the reducer to run at the end of the map job, so that a mapper
// that runs without error will advance the cursor at the end of processing (unless this was
// a dry run, in which case the cursor should not be advanced).
if (recurring == null) {
emit(cursorTime, executeTime);
return;
}
getContext().incrementCounter("Recurring billing events encountered");
// Ignore any recurring billing events that have yet to apply.
if (recurring.getEventTime().isAfter(executeTime)
// This second case occurs when a domain is transferred or deleted before first renewal.
|| recurring.getRecurrenceEndTime().isBefore(recurring.getEventTime())) {
getContext().incrementCounter("Recurring billing events ignored");
return;
}
int numBillingEventsSaved = 0;
try {
numBillingEventsSaved =
tm().transactNew(
() -> expandBillingEvent(recurring, executeTime, cursorTime, isDryRun));
} catch (Throwable t) {
getContext().incrementCounter("error: " + t.getClass().getSimpleName());
getContext().incrementCounter(ERROR_COUNTER);
throw new RuntimeException(
String.format(
"Error while expanding Recurring billing events for %d", recurring.getId()),
t);
}
if (!isDryRun) {
getContext().incrementCounter("Saved OneTime billing events", numBillingEventsSaved);
} else {
getContext()
.incrementCounter("Generated OneTime billing events (dry run)", numBillingEventsSaved);
}
}
}
/**
* "Reducer" to advance the cursor after all map jobs have been completed. The NullInput into the
* mapper will cause the mapper to emit one timestamp pair (current cursor and execution time),
* and the cursor will be advanced (and the timestamps logged) at the end of a successful
* mapreduce.
*/
public static class ExpandRecurringBillingEventsReducer
extends Reducer<DateTime, DateTime, Void> {
private final boolean isDryRun;
private final DateTime expectedPersistedCursorTime;
public ExpandRecurringBillingEventsReducer(
boolean isDryRun, DateTime expectedPersistedCursorTime) {
this.isDryRun = isDryRun;
this.expectedPersistedCursorTime = expectedPersistedCursorTime;
}
@Override
public void reduce(final DateTime cursorTime, final ReducerInput<DateTime> executionTimeInput) {
if (getContext().getCounter(ERROR_COUNTER).getValue() > 0) {
logger.atSevere().log(
"One or more errors logged during recurring event expansion. Cursor will"
+ " not be advanced.");
return;
}
final DateTime executionTime = executionTimeInput.next();
logger.atInfo().log(
"Recurring event expansion %s complete for billing event range [%s, %s).",
isDryRun ? "(dry run) " : "", cursorTime, executionTime);
tm().transact(
() -> {
Cursor cursor =
auditedOfy().load().key(Cursor.createGlobalKey(RECURRING_BILLING)).now();
DateTime currentCursorTime =
(cursor == null ? START_OF_TIME : cursor.getCursorTime());
if (!currentCursorTime.equals(expectedPersistedCursorTime)) {
logger.atSevere().log(
"Current cursor position %s does not match expected cursor position %s.",
currentCursorTime, expectedPersistedCursorTime);
return;
}
if (!isDryRun) {
tm().put(Cursor.createGlobal(RECURRING_BILLING, executionTime));
}
});
}
}
private static int expandBillingEvent(
Recurring recurring, DateTime executeTime, DateTime cursorTime, boolean isDryRun) {
ImmutableSet.Builder<OneTime> syntheticOneTimesBuilder = new ImmutableSet.Builder<>();
final Registry tld = Registry.get(getTldFromDomainName(recurring.getTargetId()));
// Determine the complete set of times at which this recurring event should
// occur (up to and including the runtime of the mapreduce).
// occur (up to and including the runtime of the action).
Iterable<DateTime> eventTimes =
recurring
.getRecurrenceTimeOfYear()
@ -385,14 +253,10 @@ public class ExpandRecurringBillingEventsAction implements Runnable {
VKey.create(
DomainBase.class, recurring.getDomainRepoId(), recurring.getParentKey().getParent());
Iterable<OneTime> oneTimesForDomain;
if (tm().isOfy()) {
oneTimesForDomain = auditedOfy().load().type(OneTime.class).ancestor(domainKey.getOfyKey());
} else {
oneTimesForDomain =
tm().createQueryComposer(OneTime.class)
.where("domainRepoId", EQ, recurring.getDomainRepoId())
.list();
}
oneTimesForDomain =
tm().createQueryComposer(OneTime.class)
.where("domainRepoId", EQ, recurring.getDomainRepoId())
.list();
// Determine the billing times that already have OneTime events persisted.
ImmutableSet<DateTime> existingBillingTimes =
@ -463,7 +327,7 @@ public class ExpandRecurringBillingEventsAction implements Runnable {
/**
* Filters a set of {@link DateTime}s down to event times that are in scope for a particular
* mapreduce run, given the cursor time and the mapreduce execution time.
* action run, given the cursor time and the action execution time.
*/
protected static ImmutableSet<DateTime> getBillingTimesInScope(
Iterable<DateTime> eventTimes,

View file

@ -1,128 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.batch;
import static google.registry.mapreduce.MapreduceRunner.PARAM_FAST;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
import com.google.appengine.tools.mapreduce.Mapper;
import com.google.common.collect.ImmutableList;
import com.googlecode.objectify.Key;
import google.registry.mapreduce.MapreduceRunner;
import google.registry.mapreduce.inputs.EppResourceInputs;
import google.registry.model.EppResource;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.request.Action;
import google.registry.request.Parameter;
import google.registry.request.Response;
import google.registry.request.auth.Auth;
import javax.inject.Inject;
/**
* A mapreduce that re-saves all EppResources, projecting them forward to the current time.
*
* <p>This is useful for completing data migrations on EppResource fields that are accomplished
* with @OnSave or @OnLoad annotations, and also guarantees that all EppResources will get fresh
* commit logs (for backup purposes). Additionally, pending actions such as transfers or grace
* periods that are past their effective time will be resolved.
*
* <p>Because there are no auth settings in the {@link Action} annotation, this command can only be
* run internally, or by pretending to be internal by setting the X-AppEngine-QueueName header,
* which only admin users can do.
*
* <p>If the <code>?fast=true</code> querystring parameter is passed, then entities that are not
* changed by {@link EppResource#cloneProjectedAtTime} will not be re-saved. This helps prevent
* mutation load on the DB and has the beneficial side effect of writing out smaller commit logs.
* Note that this does NOT pick up mutations caused by migrations using the {@link
* com.googlecode.objectify.annotation.OnLoad} annotation, so if you are running a one-off schema
* migration, do not use fast mode. Fast mode defaults to false for this reason, but is used by the
* monthly invocation of the mapreduce.
*/
@Action(
service = Action.Service.BACKEND,
path = "/_dr/task/resaveAllEppResources",
auth = Auth.AUTH_INTERNAL_OR_ADMIN)
// No longer needed in SQL. Subject to future removal.
@Deprecated
@DeleteAfterMigration
public class ResaveAllEppResourcesAction implements Runnable {
@Inject MapreduceRunner mrRunner;
@Inject Response response;
@Inject
@Parameter(PARAM_FAST)
boolean isFast;
@Inject
ResaveAllEppResourcesAction() {}
/**
* The number of shards to run the map-only mapreduce on.
*
* <p>This is less than the default of 100 because we only run this action monthly and can afford
* it being slower, but we don't want to write out lots of large commit logs in a short period of
* time because they make the Cloud SQL migration tougher.
*/
private static final int NUM_SHARDS = 10;
@Override
public void run() {
mrRunner
.setJobName("Re-save all EPP resources")
.setModuleName("backend")
.setDefaultMapShards(NUM_SHARDS)
.runMapOnly(
new ResaveAllEppResourcesActionMapper(isFast),
ImmutableList.of(EppResourceInputs.createKeyInput(EppResource.class)))
.sendLinkToMapreduceConsole(response);
}
/** Mapper to re-save all EPP resources. */
public static class ResaveAllEppResourcesActionMapper
extends Mapper<Key<EppResource>, Void, Void> {
private static final long serialVersionUID = -7721628665138087001L;
private final boolean isFast;
ResaveAllEppResourcesActionMapper(boolean isFast) {
this.isFast = isFast;
}
@Override
public final void map(final Key<EppResource> resourceKey) {
boolean resaved =
tm().transact(
() -> {
EppResource originalResource = auditedOfy().load().key(resourceKey).now();
EppResource projectedResource =
originalResource.cloneProjectedAtTime(tm().getTransactionTime());
if (isFast && originalResource.equals(projectedResource)) {
return false;
} else {
auditedOfy().save().entity(projectedResource).now();
return true;
}
});
getContext()
.incrementCounter(
String.format(
"%s entities %s",
resourceKey.getKind(), resaved ? "re-saved" : "with no changes skipped"));
}
}
}

View file

@ -14,6 +14,7 @@
package google.registry.batch;
import static google.registry.batch.BatchModule.PARAM_FAST;
import static google.registry.beam.BeamUtils.createJobName;
import static javax.servlet.http.HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
import static javax.servlet.http.HttpServletResponse.SC_OK;
@ -32,7 +33,6 @@ import google.registry.request.Parameter;
import google.registry.request.Response;
import google.registry.request.auth.Auth;
import google.registry.util.Clock;
import java.util.Optional;
import javax.inject.Inject;
/**
@ -63,8 +63,6 @@ public class ResaveAllEppResourcesPipelineAction implements Runnable {
static final String PATH = "/_dr/task/resaveAllEppResourcesPipeline";
static final String PIPELINE_NAME = "resave_all_epp_resources_pipeline";
public static final String PARAM_FAST = "fast";
private final String projectId;
private final String jobRegion;
private final String stagingBucketUrl;
@ -78,14 +76,14 @@ public class ResaveAllEppResourcesPipelineAction implements Runnable {
@Config("projectId") String projectId,
@Config("defaultJobRegion") String jobRegion,
@Config("beamStagingBucketUrl") String stagingBucketUrl,
@Parameter(PARAM_FAST) Optional<Boolean> fast,
@Parameter(PARAM_FAST) boolean fast,
Clock clock,
Response response,
Dataflow dataflow) {
this.projectId = projectId;
this.jobRegion = jobRegion;
this.stagingBucketUrl = stagingBucketUrl;
this.fast = fast.orElse(false);
this.fast = fast;
this.clock = clock;
this.response = response;
this.dataflow = dataflow;

View file

@ -249,9 +249,6 @@ public class RdeIO {
// Now that we're done, output roll the cursor forward.
if (key.manual()) {
logger.atInfo().log("Manual operation; not advancing cursor or enqueuing upload task.");
// Temporary measure to run RDE in beam in parallel with the daily MapReduce based RDE runs.
} else if (tm().isOfy()) {
logger.atInfo().log("Ofy is primary TM; not advancing cursor or enqueuing upload task.");
} else {
outputReceiver.output(KV.of(key, revision));
}

View file

@ -224,7 +224,7 @@ public final class RegistryConfig {
/**
* Returns the Google Cloud Storage bucket for storing zone files.
*
* @see google.registry.backup.ExportCommitLogDiffAction
* @see google.registry.tools.server.GenerateZoneFilesAction
*/
@Provides
@Config("zoneFilesBucket")
@ -232,22 +232,11 @@ public final class RegistryConfig {
return projectId + "-zonefiles";
}
/**
* Returns the Google Cloud Storage bucket for storing commit logs.
*
* @see google.registry.backup.ExportCommitLogDiffAction
*/
/** @see RegistryConfig#getDatabaseRetention() */
@Provides
@Config("commitLogGcsBucket")
public static String provideCommitLogGcsBucket(@Config("projectId") String projectId) {
return projectId + "-commits";
}
/** @see RegistryConfig#getCommitLogDatastoreRetention() */
@Provides
@Config("commitLogDatastoreRetention")
public static Duration provideCommitLogDatastoreRetention() {
return RegistryConfig.getCommitLogDatastoreRetention();
@Config("databaseRetention")
public static Duration provideDatabaseRetention() {
return RegistryConfig.getDatabaseRetention();
}
/**
@ -261,18 +250,6 @@ public final class RegistryConfig {
return projectId + "-domain-lists";
}
/**
* Batch size for the number of transactions' worth of commit log data to process at once when
* exporting a commit log diff.
*
* @see google.registry.backup.ExportCommitLogDiffAction
*/
@Provides
@Config("commitLogDiffExportBatchSize")
public static int provideCommitLogDiffExportBatchSize() {
return 100;
}
/**
* Returns the Google Cloud Storage bucket for staging BRDA escrow deposits.
*
@ -764,17 +741,6 @@ public final class RegistryConfig {
return config.rde.reportUrlPrefix;
}
/**
* Maximum amount of time generating an escrow deposit for a TLD could take, before killing.
*
* @see google.registry.rde.RdeStagingReducer
*/
@Provides
@Config("rdeStagingLockTimeout")
public static Duration provideRdeStagingLockTimeout() {
return Duration.standardHours(2);
}
/**
* Maximum amount of time it should ever take to upload an escrow deposit, before killing.
*
@ -791,7 +757,7 @@ public final class RegistryConfig {
*
* <p>This value was communicated to us by the escrow provider.
*
* @see google.registry.rde.RdeStagingReducer
* @see google.registry.rde.RdeUploadAction
*/
@Provides
@Config("rdeUploadSftpCooldown")
@ -1087,8 +1053,8 @@ public final class RegistryConfig {
* @see google.registry.batch.AsyncTaskEnqueuer
*/
@Provides
@Config("asyncDeleteFlowMapreduceDelay")
public static Duration provideAsyncDeleteFlowMapreduceDelay(RegistryConfigSettings config) {
@Config("asyncDeleteDelay")
public static Duration provideAsyncDeleteDelay(RegistryConfigSettings config) {
return Duration.standardSeconds(config.misc.asyncDeleteDelaySeconds);
}
@ -1372,33 +1338,15 @@ public final class RegistryConfig {
return "gs://" + getProjectId() + "-datastore-backups";
}
/**
* Number of sharded commit log buckets.
*
* <p>This number is crucial for determining how much transactional throughput the system can
* allow, because it determines how many entity groups are available for writing commit logs.
* Since entity groups have a one transaction per second SLA (which is actually like ten in
* practice), a registry that wants to be able to handle one hundred transactions per second
* should have one hundred buckets.
*
* <p><b>Warning:</b> This can be raised but never lowered.
*
* @see google.registry.model.ofy.CommitLogBucket
*/
public static int getCommitLogBucketCount() {
return CONFIG_SETTINGS.get().datastore.commitLogBucketsNum;
}
/**
* Returns the length of time before commit logs should be deleted from Datastore.
*
* <p>The only reason you'll want to retain this commit logs in Datastore is for performing
* point-in-time restoration queries for subsystems like RDE.
*
* @see google.registry.backup.DeleteOldCommitLogsAction
* @see google.registry.model.translators.CommitLogRevisionsTranslatorFactory
* @see google.registry.tools.server.GenerateZoneFilesAction
*/
public static Duration getCommitLogDatastoreRetention() {
public static Duration getDatabaseRetention() {
return Duration.standardDays(30);
}

View file

@ -107,7 +107,6 @@ public class RegistryConfigSettings {
/** Configuration for Cloud Datastore. */
public static class Datastore {
public int commitLogBucketsNum;
public int eppResourceIndexBucketsNum;
public int baseOfyRetryMillis;
}

View file

@ -183,10 +183,6 @@ registryPolicy:
requireSslCertificates: true
datastore:
# Number of commit log buckets in Datastore. Lowering this after initial
# install risks losing up to a days' worth of differential backups.
commitLogBucketsNum: 397
# Number of EPP resource index buckets in Datastore. Dont change after
# initial install.
eppResourceIndexBucketsNum: 997

View file

@ -11,7 +11,6 @@ registryPolicy:
Line 2 is this 1.
datastore:
commitLogBucketsNum: 3
eppResourceIndexBucketsNum: 3
baseOfyRetryMillis: 0

View file

@ -1,56 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.cron;
import com.google.common.collect.ImmutableMultimap;
import google.registry.model.ofy.CommitLogBucket;
import google.registry.request.Action;
import google.registry.request.Action.Service;
import google.registry.request.Parameter;
import google.registry.request.auth.Auth;
import google.registry.util.CloudTasksUtils;
import java.util.Optional;
import javax.inject.Inject;
/** Action for fanning out cron tasks for each commit log bucket. */
@Action(
service = Action.Service.BACKEND,
path = "/_dr/cron/commitLogFanout",
automaticallyPrintOk = true,
auth = Auth.AUTH_INTERNAL_OR_ADMIN)
public final class CommitLogFanoutAction implements Runnable {
public static final String BUCKET_PARAM = "bucket";
@Inject CloudTasksUtils cloudTasksUtils;
@Inject @Parameter("endpoint") String endpoint;
@Inject @Parameter("queue") String queue;
@Inject @Parameter("jitterSeconds") Optional<Integer> jitterSeconds;
@Inject CommitLogFanoutAction() {}
@Override
public void run() {
for (int bucketId : CommitLogBucket.getBucketIds()) {
cloudTasksUtils.enqueue(
queue,
cloudTasksUtils.createPostTaskWithJitter(
endpoint,
Service.BACKEND.toString(),
ImmutableMultimap.of(BUCKET_PARAM, Integer.toString(bucketId)),
jitterSeconds));
}
}
}

View file

@ -41,7 +41,6 @@ import google.registry.dns.DnsConstants.TargetType;
import google.registry.model.tld.Registries;
import google.registry.util.Clock;
import google.registry.util.NonFinalForTesting;
import google.registry.util.SystemClock;
import java.util.List;
import java.util.Optional;
import java.util.logging.Level;
@ -83,17 +82,6 @@ public class DnsQueue {
this.clock = clock;
}
/**
* Constructs a new instance.
*
* <p><b>Note:</b> Prefer <code>@Inject</code>ing DnsQueue instances instead. You should only use
* this helper method in situations for which injection does not work, e.g. inside mapper or
* reducer classes in mapreduces that need to be Serializable.
*/
public static DnsQueue create() {
return new DnsQueue(getQueue(DNS_PULL_QUEUE_NAME), new SystemClock());
}
@VisibleForTesting
public static DnsQueue createForTesting(Clock clock) {
return new DnsQueue(getQueue(DNS_PULL_QUEUE_NAME), clock);

View file

@ -83,7 +83,6 @@
<url><![CDATA[/_dr/task/resaveAllEppResourcesPipeline?fast=true]]></url>
<description>
This job resaves all our resources, projected in time to "now".
It is needed for "deleteOldCommitLogs" to work correctly.
</description>
<schedule>1st monday of month 09:00</schedule>
<target>backend</target>
@ -92,9 +91,9 @@
<cron>
<url><![CDATA[/_dr/task/expandRecurringBillingEvents]]></url>
<description>
This job runs a mapreduce that creates synthetic OneTime billing events from Recurring billing
This job runs an action that creates synthetic OneTime billing events from Recurring billing
events. Events are created for all instances of Recurring billing events that should exist
between the RECURRING_BILLING cursor's time and the execution time of the mapreduce.
between the RECURRING_BILLING cursor's time and the execution time of the action.
</description>
<schedule>every day 03:00</schedule>
<target>backend</target>

View file

@ -205,48 +205,6 @@
<url-pattern>/_dr/cron/fanout</url-pattern>
</servlet-mapping>
<!-- Backups. -->
<!-- Fans out a cron task over all commit log buckets. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/cron/commitLogFanout</url-pattern>
</servlet-mapping>
<!-- Deletes old commit logs from Datastore. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/deleteOldCommitLogs</url-pattern>
</servlet-mapping>
<!-- Checkpoints commit logs. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/cron/commitLogCheckpoint</url-pattern>
</servlet-mapping>
<!-- Exports commit log diffs. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/exportCommitLogDiff</url-pattern>
</servlet-mapping>
<!-- Deletes commit logs. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/killCommitLogs</url-pattern>
</servlet-mapping>
<!-- MapReduce servlet. -->
<servlet>
<servlet-name>mapreduce</servlet-name>
<servlet-class>com.google.appengine.tools.mapreduce.MapReduceServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>mapreduce</servlet-name>
<url-pattern>/_dr/mapreduce/*</url-pattern>
</servlet-mapping>
<!-- Pipeline GUI servlets. -->
<servlet>
<servlet-name>pipeline</servlet-name>
@ -286,24 +244,18 @@
<url-pattern>/_dr/task/exportDomainLists</url-pattern>
</servlet-mapping>
<!-- Mapreduce to delete all prober data. -->
<!-- Action to delete all prober data. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/deleteProberData</url-pattern>
</servlet-mapping>
<!-- Mapreduce to delete load test data. -->
<!-- Action to delete load test data. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/deleteLoadTestData</url-pattern>
</servlet-mapping>
<!-- Mapreduce to re-save all EppResources. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/resaveAllEppResources</url-pattern>
</servlet-mapping>
<!-- Dataflow pipeline to re-save all EPP resources. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
@ -334,7 +286,7 @@
<url-pattern>/_dr/task/refreshDnsOnHostRename</url-pattern>
</servlet-mapping>
<!-- Mapreduce to expand recurring billing events into OneTimes. -->
<!-- Action to expand recurring billing events into OneTimes. -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/expandRecurringBillingEvents</url-pattern>
@ -352,30 +304,6 @@
<url-pattern>/_dr/task/sendExpiringCertificateNotificationEmail</url-pattern>
</servlet-mapping>
<!-- Mapreduce to import contacts from escrow file -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/importRdeContacts</url-pattern>
</servlet-mapping>
<!-- Mapreduce to import hosts from escrow file -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/importRdeHosts</url-pattern>
</servlet-mapping>
<!-- Mapreduce to import domains from escrow file -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/importRdeDomains</url-pattern>
</servlet-mapping>
<!-- Mapreduce to link hosts from escrow file to superordinate domains -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/linkRdeHosts</url-pattern>
</servlet-mapping>
<!-- Action to automatically re-lock a domain after unlocking it -->
<servlet-mapping>
<servlet-name>backend-servlet</servlet-name>

View file

@ -66,40 +66,6 @@
<url-pattern>/_dr/epptool</url-pattern>
</servlet-mapping>
<!-- Mapreduce to re-save all HistoryEntries. -->
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/task/resaveAllHistoryEntries</url-pattern>
</servlet-mapping>
<!-- Mapreduce to delete EppResources, children, and indices. -->
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/task/killAllEppResources</url-pattern>
</servlet-mapping>
<!-- Mapreduce to delete all commit logs. -->
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/task/killAllCommitLogs</url-pattern>
</servlet-mapping>
<!-- Restores commit logs. -->
<servlet-mapping>
<servlet-name>tools-servlet</servlet-name>
<url-pattern>/_dr/task/restoreCommitLogs</url-pattern>
</servlet-mapping>
<!-- This path serves up the App Engine results page for mapreduce runs. -->
<servlet>
<servlet-name>mapreduce</servlet-name>
<servlet-class>com.google.appengine.tools.mapreduce.MapReduceServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>mapreduce</servlet-name>
<url-pattern>/_dr/mapreduce/*</url-pattern>
</servlet-mapping>
<!-- Pipeline GUI servlets. -->
<servlet>
<servlet-name>pipeline</servlet-name>

View file

@ -106,7 +106,6 @@
<url><![CDATA[/_dr/task/resaveAllEppResourcesPipeline?fast=true]]></url>
<description>
This job resaves all our resources, projected in time to "now".
It is needed for "deleteOldCommitLogs" to work correctly.
</description>
<schedule>1st monday of month 09:00</schedule>
<target>backend</target>
@ -133,9 +132,9 @@
<cron>
<url><![CDATA[/_dr/task/expandRecurringBillingEvents]]></url>
<description>
This job runs a mapreduce that creates synthetic OneTime billing events from Recurring billing
This job runs an action that creates synthetic OneTime billing events from Recurring billing
events. Events are created for all instances of Recurring billing events that should exist
between the RECURRING_BILLING cursor's time and the execution time of the mapreduce.
between the RECURRING_BILLING cursor's time and the execution time of the action.
</description>
<schedule>every day 03:00</schedule>
<target>backend</target>

View file

@ -35,7 +35,6 @@
<url><![CDATA[/_dr/task/resaveAllEppResourcesPipeline?fast=true]]></url>
<description>
This job resaves all our resources, projected in time to "now".
It is needed for "deleteOldCommitLogs" to work correctly.
</description>
<schedule>1st monday of month 09:00</schedule>
<target>backend</target>

View file

@ -90,7 +90,6 @@
<url><![CDATA[/_dr/task/resaveAllEppResourcesPipeline?fast=true]]></url>
<description>
This job resaves all our resources, projected in time to "now".
It is needed for "deleteOldCommitLogs" to work correctly.
</description>
<schedule>1st monday of month 09:00</schedule>
<target>backend</target>
@ -108,9 +107,9 @@
<cron>
<url><![CDATA[/_dr/task/expandRecurringBillingEvents]]></url>
<description>
This job runs a mapreduce that creates synthetic OneTime billing events from Recurring billing
This job runs an action that creates synthetic OneTime billing events from Recurring billing
events. Events are created for all instances of Recurring billing events that should exist
between the RECURRING_BILLING cursor's time and the execution time of the mapreduce.
between the RECURRING_BILLING cursor's time and the execution time of the action.
</description>
<schedule>every day 03:00</schedule>
<target>backend</target>

View file

@ -15,49 +15,33 @@
package google.registry.export;
import static com.google.common.base.Verify.verifyNotNull;
import static google.registry.mapreduce.inputs.EppResourceInputs.createEntityInput;
import static google.registry.model.EppResourceUtils.isActive;
import static google.registry.model.tld.Registries.getTldsOfType;
import static google.registry.persistence.transaction.TransactionManagerFactory.jpaTm;
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
import static google.registry.request.Action.Method.POST;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.appengine.tools.mapreduce.Mapper;
import com.google.appengine.tools.mapreduce.Reducer;
import com.google.appengine.tools.mapreduce.ReducerInput;
import com.google.cloud.storage.BlobId;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Suppliers;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.flogger.FluentLogger;
import com.google.common.net.MediaType;
import google.registry.config.RegistryConfig.Config;
import google.registry.gcs.GcsUtils;
import google.registry.mapreduce.MapreduceRunner;
import google.registry.model.domain.DomainBase;
import google.registry.model.tld.Registry;
import google.registry.model.tld.Registry.TldType;
import google.registry.request.Action;
import google.registry.request.Response;
import google.registry.request.auth.Auth;
import google.registry.storage.drive.DriveConnection;
import google.registry.util.Clock;
import google.registry.util.NonFinalForTesting;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.List;
import java.util.function.Supplier;
import javax.inject.Inject;
import org.joda.time.DateTime;
/**
* A mapreduce that exports the list of active domains on all real TLDs to Google Drive and GCS.
* An action that exports the list of active domains on all real TLDs to Google Drive and GCS.
*
* <p>Each TLD's active domain names are exported as a newline-delimited flat text file with the
* name TLD.txt into the domain-lists bucket. Note that this overwrites the files in place.
@ -70,11 +54,8 @@ import org.joda.time.DateTime;
public class ExportDomainListsAction implements Runnable {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private static final int MAX_NUM_REDUCE_SHARDS = 100;
public static final String REGISTERED_DOMAINS_FILENAME = "registered_domains.txt";
@Inject MapreduceRunner mrRunner;
@Inject Response response;
@Inject Clock clock;
@Inject DriveConnection driveConnection;
@Inject GcsUtils gcsUtils;
@ -86,56 +67,44 @@ public class ExportDomainListsAction implements Runnable {
public void run() {
ImmutableSet<String> realTlds = getTldsOfType(TldType.REAL);
logger.atInfo().log("Exporting domain lists for TLDs %s.", realTlds);
if (tm().isOfy()) {
mrRunner
.setJobName("Export domain lists")
.setModuleName("backend")
.setDefaultReduceShards(Math.min(realTlds.size(), MAX_NUM_REDUCE_SHARDS))
.runMapreduce(
new ExportDomainListsMapper(clock.nowUtc(), realTlds),
new ExportDomainListsReducer(gcsBucket, gcsUtils),
ImmutableList.of(createEntityInput(DomainBase.class)))
.sendLinkToMapreduceConsole(response);
} else {
realTlds.forEach(
tld -> {
List<String> domains =
tm().transact(
() ->
// Note that if we had "creationTime <= :now" in the condition (not
// necessary as there is no pending creation, the order of deletionTime
// and creationTime in the query would have been significant and it
// should come after deletionTime. When Hibernate substitutes "now" it
// will first validate that the **first** field that is to be compared
// with it (deletionTime) is assignable from the substituted Java object
// (click.nowUtc()). Since creationTime is a CreateAutoTimestamp, if it
// comes first, we will need to substitute "now" with
// CreateAutoTimestamp.create(clock.nowUtc()). This might look a bit
// strange as the Java object type is clearly incompatible between the
// two fields deletionTime (DateTime) and creationTime, yet they are
// compared with the same "now". It is actually OK because in the end
// Hibernate converts everything to SQL types (and Java field names to
// SQL column names) to run the query. Both CreateAutoTimestamp and
// DateTime are persisted as timestamp_z in SQL. It is only the
// validation that compares the Java types, and only with the first
// field that compares with the substituted value.
jpaTm()
.query(
"SELECT fullyQualifiedDomainName FROM Domain "
+ "WHERE tld = :tld "
+ "AND deletionTime > :now "
+ "ORDER by fullyQualifiedDomainName ASC",
String.class)
.setParameter("tld", tld)
.setParameter("now", clock.nowUtc())
.getResultList());
String domainsList = Joiner.on("\n").join(domains);
logger.atInfo().log(
"Exporting %d domains for TLD %s to GCS and Drive.", domains.size(), tld);
exportToGcs(tld, domainsList, gcsBucket, gcsUtils);
exportToDrive(tld, domainsList, driveConnection);
});
}
realTlds.forEach(
tld -> {
List<String> domains =
tm().transact(
() ->
// Note that if we had "creationTime <= :now" in the condition (not
// necessary as there is no pending creation, the order of deletionTime
// and creationTime in the query would have been significant and it
// should come after deletionTime. When Hibernate substitutes "now" it
// will first validate that the **first** field that is to be compared
// with it (deletionTime) is assignable from the substituted Java object
// (click.nowUtc()). Since creationTime is a CreateAutoTimestamp, if it
// comes first, we will need to substitute "now" with
// CreateAutoTimestamp.create(clock.nowUtc()). This might look a bit
// strange as the Java object type is clearly incompatible between the
// two fields deletionTime (DateTime) and creationTime, yet they are
// compared with the same "now". It is actually OK because in the end
// Hibernate converts everything to SQL types (and Java field names to
// SQL column names) to run the query. Both CreateAutoTimestamp and
// DateTime are persisted as timestamp_z in SQL. It is only the
// validation that compares the Java types, and only with the first
// field that compares with the substituted value.
jpaTm()
.query(
"SELECT fullyQualifiedDomainName FROM Domain "
+ "WHERE tld = :tld "
+ "AND deletionTime > :now "
+ "ORDER by fullyQualifiedDomainName ASC",
String.class)
.setParameter("tld", tld)
.setParameter("now", clock.nowUtc())
.getResultList());
String domainsList = Joiner.on("\n").join(domains);
logger.atInfo().log(
"Exporting %d domains for TLD %s to GCS and Drive.", domains.size(), tld);
exportToGcs(tld, domainsList, gcsBucket, gcsUtils);
exportToDrive(tld, domainsList, driveConnection);
});
}
protected static boolean exportToDrive(
@ -178,80 +147,4 @@ public class ExportDomainListsAction implements Runnable {
}
return true;
}
static class ExportDomainListsMapper extends Mapper<DomainBase, String, String> {
private static final long serialVersionUID = -7312206212434039854L;
private final DateTime exportTime;
private final ImmutableSet<String> realTlds;
ExportDomainListsMapper(DateTime exportTime, ImmutableSet<String> realTlds) {
this.exportTime = exportTime;
this.realTlds = realTlds;
}
@Override
public void map(DomainBase domain) {
if (realTlds.contains(domain.getTld()) && isActive(domain, exportTime)) {
emit(domain.getTld(), domain.getDomainName());
getContext().incrementCounter(String.format("domains in tld %s", domain.getTld()));
}
}
}
static class ExportDomainListsReducer extends Reducer<String, String, Void> {
private static final long serialVersionUID = 7035260977259119087L;
/** Allows overriding the default {@link DriveConnection} in tests. */
@NonFinalForTesting
private static Supplier<DriveConnection> driveConnectionSupplier =
Suppliers.memoize(() -> DaggerDriveModule_DriveComponent.create().driveConnection());
private final String gcsBucket;
private final GcsUtils gcsUtils;
/**
* Non-serializable {@link DriveConnection} that will be created when an instance of {@link
* ExportDomainListsReducer} is deserialized in a MR pipeline worker.
*
* <p>See {@link #readObject(ObjectInputStream)}.
*/
private transient DriveConnection driveConnection;
public ExportDomainListsReducer(String gcsBucket, GcsUtils gcsUtils) {
this.gcsBucket = gcsBucket;
this.gcsUtils = gcsUtils;
}
@SuppressWarnings("unused")
private void readObject(ObjectInputStream is) throws IOException, ClassNotFoundException {
is.defaultReadObject();
driveConnection = driveConnectionSupplier.get();
}
@Override
public void reduce(String tld, ReducerInput<String> fqdns) {
ImmutableList<String> domains = ImmutableList.sortedCopyOf(() -> fqdns);
String domainsList = Joiner.on('\n').join(domains);
logger.atInfo().log("Exporting %d domains for TLD %s to GCS and Drive.", domains.size(), tld);
if (exportToGcs(tld, domainsList, gcsBucket, gcsUtils)) {
getContext().incrementCounter("domain lists successful written out to GCS");
} else {
getContext().incrementCounter("domain lists failed to write out to GCS");
}
if (exportToDrive(tld, domainsList, driveConnection)) {
getContext().incrementCounter("domain lists successfully written out to Drive");
} else {
getContext().incrementCounter("domain lists failed to write out to Drive");
}
}
@VisibleForTesting
static void setDriveConnectionForTesting(
Supplier<DriveConnection> testDriveConnectionSupplier) {
driveConnectionSupplier = testDriveConnectionSupplier;
}
}
}

View file

@ -84,5 +84,11 @@ public class EppToolAction implements Runnable {
static String provideClientId(HttpServletRequest req) {
return extractRequiredParameter(req, "clientId");
}
@Provides
@Parameter("dryRun")
static boolean provideIsDryRun(HttpServletRequest req) {
return extractBooleanParameter(req, "dryRun");
}
}
}

View file

@ -72,8 +72,7 @@ public final class HostDeleteFlow implements TransactionalFlow {
StatusValue.PENDING_DELETE,
StatusValue.SERVER_DELETE_PROHIBITED);
private static final DnsQueue dnsQueue = DnsQueue.create();
@Inject DnsQueue dnsQueue;
@Inject ExtensionManager extensionManager;
@Inject @RegistrarId String registrarId;
@Inject @TargetId String targetId;

View file

@ -44,7 +44,7 @@ import javax.inject.Inject;
/**
* Utilities for working with Google Cloud Storage.
*
* <p>It is {@link Serializable} so that it can be used in MapReduce or Beam.
* <p>It is {@link Serializable} so that it can be used in Beam.
*/
public class GcsUtils implements Serializable {

View file

@ -1,57 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce;
import static google.registry.mapreduce.MapreduceRunner.PARAM_DRY_RUN;
import static google.registry.mapreduce.MapreduceRunner.PARAM_FAST;
import static google.registry.mapreduce.MapreduceRunner.PARAM_MAP_SHARDS;
import static google.registry.mapreduce.MapreduceRunner.PARAM_REDUCE_SHARDS;
import static google.registry.request.RequestParameters.extractBooleanParameter;
import static google.registry.request.RequestParameters.extractOptionalIntParameter;
import dagger.Module;
import dagger.Provides;
import google.registry.request.Parameter;
import java.util.Optional;
import javax.servlet.http.HttpServletRequest;
/** Dagger module for the mapreduce package. */
@Module
public final class MapreduceModule {
@Provides
@Parameter(PARAM_DRY_RUN)
static boolean provideIsDryRun(HttpServletRequest req) {
return extractBooleanParameter(req, PARAM_DRY_RUN);
}
@Provides
@Parameter(PARAM_FAST)
static boolean provideIsFast(HttpServletRequest req) {
return extractBooleanParameter(req, PARAM_FAST);
}
@Provides
@Parameter(PARAM_MAP_SHARDS)
static Optional<Integer> provideMapShards(HttpServletRequest req) {
return extractOptionalIntParameter(req, PARAM_MAP_SHARDS);
}
@Provides
@Parameter(PARAM_REDUCE_SHARDS)
static Optional<Integer> provideReduceShards(HttpServletRequest req) {
return extractOptionalIntParameter(req, PARAM_REDUCE_SHARDS);
}
}

View file

@ -1,315 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce;
import static com.google.appengine.tools.pipeline.PipelineServiceFactory.newPipelineService;
import static com.google.common.base.Preconditions.checkNotNull;
import static google.registry.util.PreconditionsUtils.checkArgumentNotNull;
import com.google.appengine.tools.mapreduce.Input;
import com.google.appengine.tools.mapreduce.MapJob;
import com.google.appengine.tools.mapreduce.MapReduceJob;
import com.google.appengine.tools.mapreduce.MapReduceSettings;
import com.google.appengine.tools.mapreduce.MapReduceSpecification;
import com.google.appengine.tools.mapreduce.MapSettings;
import com.google.appengine.tools.mapreduce.MapSpecification;
import com.google.appengine.tools.mapreduce.Mapper;
import com.google.appengine.tools.mapreduce.Marshallers;
import com.google.appengine.tools.mapreduce.Output;
import com.google.appengine.tools.mapreduce.Reducer;
import com.google.appengine.tools.mapreduce.outputs.NoOutput;
import com.google.appengine.tools.pipeline.Job0;
import com.google.appengine.tools.pipeline.JobSetting;
import com.google.common.flogger.FluentLogger;
import google.registry.mapreduce.inputs.ConcatenatingInput;
import google.registry.request.Parameter;
import google.registry.request.Response;
import google.registry.util.AppEngineServiceUtils;
import java.io.Serializable;
import java.util.Optional;
import javax.inject.Inject;
import org.joda.time.Duration;
/**
* Runner for map-only or full map and reduce mapreduces.
*
* <p>We use hardcoded serialization marshallers for moving data between steps, so all types used as
* keys or values must implement {@link Serializable}.
*/
public class MapreduceRunner {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
public static final String PARAM_DRY_RUN = "dryRun";
public static final String PARAM_MAP_SHARDS = "mapShards";
public static final String PARAM_REDUCE_SHARDS = "reduceShards";
public static final String PARAM_FAST = "fast";
private static final String BASE_URL = "/_dr/mapreduce/";
private static final String QUEUE_NAME = "mapreduce";
private static final String MAPREDUCE_CONSOLE_LINK_FORMAT =
"Mapreduce console: https://%s/_ah/pipeline/status.html?root=%s";
private final Optional<Integer> httpParamMapShards;
private final Optional<Integer> httpParamReduceShards;
private final AppEngineServiceUtils appEngineServiceUtils;
// Default to 3 minutes since many slices will contain Datastore queries that time out at 4:30.
private Duration sliceDuration = Duration.standardMinutes(3);
private String jobName;
private String moduleName;
// Defaults for number of mappers/reducers if not specified in HTTP params. The max allowable
// count for both (which is specified in the App Engine mapreduce framework) is 1000. We use 100
// mapper shards because there's a bottleneck in the App Engine mapreduce framework caused by
// updating the mapreduce status on a single Datastore entity (which only supports so many writes
// per second). The existing mapreduces don't actually do that much work for TLDs that aren't
// .com-sized, so the shards finish so quickly that contention becomes a problem. This number can
// always be tuned up for large registry systems with on the order of hundreds of thousands of
// entities on up.
// The default reducer shard count is one because most mapreduces use it to collate and output
// results. The ones that actually perform a substantial amount of work in a reduce step use a
// higher non-default number of reducer shards.
private int defaultMapShards = 100;
private int defaultReduceShards = 1;
/**
* @param mapShards number of map shards; if omitted, the {@link Input} objects will choose
* @param reduceShards number of reduce shards; if omitted, uses {@link #defaultReduceShards}
*/
@Inject
public MapreduceRunner(
@Parameter(PARAM_MAP_SHARDS) Optional<Integer> mapShards,
@Parameter(PARAM_REDUCE_SHARDS) Optional<Integer> reduceShards,
AppEngineServiceUtils appEngineServiceUtils) {
this.httpParamMapShards = mapShards;
this.httpParamReduceShards = reduceShards;
this.appEngineServiceUtils = appEngineServiceUtils;
}
/** Set the max time to run a slice before serializing; defaults to 3 minutes. */
public MapreduceRunner setSliceDuration(Duration sliceDuration) {
this.sliceDuration = checkArgumentNotNull(sliceDuration, "sliceDuration");
return this;
}
/** Set the human readable job name for display purposes. */
public MapreduceRunner setJobName(String jobName) {
this.jobName = checkArgumentNotNull(jobName, "jobName");
return this;
}
/** Set the module to run in. */
public MapreduceRunner setModuleName(String moduleName) {
this.moduleName = checkArgumentNotNull(moduleName, "moduleName");
return this;
}
/** Set the default number of mappers, if not overridden by the http param. */
public MapreduceRunner setDefaultMapShards(int defaultMapShards) {
this.defaultMapShards = defaultMapShards;
return this;
}
/** Set the default number of reducers, if not overridden by the http param. */
public MapreduceRunner setDefaultReduceShards(int defaultReduceShards) {
this.defaultReduceShards = defaultReduceShards;
return this;
}
/**
* Create a map-only mapreduce to be run as part of a pipeline.
*
* @see #runMapOnly for creating and running an independent map-only mapreduce
*
* @param mapper instance of a mapper class
* @param inputs input sources for the mapper
* @param <I> mapper input type
* @param <O> individual output record type sent to the {@link Output}
* @param <R> overall output result type
*/
public <I, O, R> MapJob<I, O, R> createMapOnlyJob(
Mapper<I, Void, O> mapper,
Output<O, R> output,
Iterable<? extends Input<? extends I>> inputs) {
checkCommonRequiredFields(inputs, mapper);
return new MapJob<>(
new MapSpecification.Builder<I, O, R>()
.setJobName(jobName)
.setInput(new ConcatenatingInput<>(inputs, httpParamMapShards.orElse(defaultMapShards)))
.setMapper(mapper)
.setOutput(output)
.build(),
new MapSettings.Builder()
.setWorkerQueueName(QUEUE_NAME)
.setBaseUrl(BASE_URL)
.setModule(moduleName)
.setMillisPerSlice((int) sliceDuration.getMillis())
.build());
}
/**
* Kick off a map-only mapreduce.
*
* <p>For simplicity, the mapreduce is hard-coded with {@link NoOutput}, on the assumption that
* all work will be accomplished via side effects during the map phase.
*
* @see #createMapOnlyJob for creating and running a map-only mapreduce as part of a pipeline
* @param mapper instance of a mapper class
* @param inputs input sources for the mapper
* @param <I> mapper input type
* @return the job id
*/
public <I> MapreduceRunnerResult runMapOnly(
Mapper<I, Void, Void> mapper, Iterable<? extends Input<? extends I>> inputs) {
return runAsPipeline(createMapOnlyJob(mapper, new NoOutput<Void, Void>(), inputs));
}
/**
* Create a mapreduce job to be run as part of a pipeline.
*
* @see #runMapreduce for creating and running an independent mapreduce
*
* @param mapper instance of a mapper class
* @param reducer instance of a reducer class
* @param inputs input sources for the mapper
* @param <I> mapper input type
* @param <K> emitted key type
* @param <V> emitted value type
* @param <O> individual output record type sent to the {@link Output}
* @param <R> overall output result type
*/
public final <I, K extends Serializable, V extends Serializable, O, R> MapReduceJob<I, K, V, O, R>
createMapreduceJob(
Mapper<I, K, V> mapper,
Reducer<K, V, O> reducer,
Iterable<? extends Input<? extends I>> inputs,
Output<O, R> output) {
checkCommonRequiredFields(inputs, mapper);
checkArgumentNotNull(reducer, "reducer");
return new MapReduceJob<>(
new MapReduceSpecification.Builder<I, K, V, O, R>()
.setJobName(jobName)
.setInput(new ConcatenatingInput<>(inputs, httpParamMapShards.orElse(defaultMapShards)))
.setMapper(mapper)
.setReducer(reducer)
.setOutput(output)
.setKeyMarshaller(Marshallers.getSerializationMarshaller())
.setValueMarshaller(Marshallers.getSerializationMarshaller())
.setNumReducers(httpParamReduceShards.orElse(defaultReduceShards))
.build(),
new MapReduceSettings.Builder()
.setWorkerQueueName(QUEUE_NAME)
.setBaseUrl(BASE_URL)
.setModule(moduleName)
.setMillisPerSlice((int) sliceDuration.getMillis())
.build());
}
/**
* Kick off a mapreduce job.
*
* <p>For simplicity, the mapreduce is hard-coded with {@link NoOutput}, on the assumption that
* all work will be accomplished via side effects during the map or reduce phases.
*
* @see #createMapreduceJob for creating and running a mapreduce as part of a pipeline
* @param mapper instance of a mapper class
* @param reducer instance of a reducer class
* @param inputs input sources for the mapper
* @param <I> mapper input type
* @param <K> emitted key type
* @param <V> emitted value type
* @return the job id
*/
public final <I, K extends Serializable, V extends Serializable>
MapreduceRunnerResult runMapreduce(
Mapper<I, K, V> mapper,
Reducer<K, V, Void> reducer,
Iterable<? extends Input<? extends I>> inputs) {
return runMapreduce(mapper, reducer, inputs, new NoOutput<Void, Void>());
}
/**
* Kick off a mapreduce job with specified Output handler.
*
* @see #createMapreduceJob for creating and running a mapreduce as part of a pipeline
* @param mapper instance of a mapper class
* @param reducer instance of a reducer class
* @param inputs input sources for the mapper
* @param <I> mapper input type
* @param <K> emitted key type
* @param <V> emitted value type
* @param <O> emitted output type
* @param <R> return value of output
* @return the job id
*/
public final <I, K extends Serializable, V extends Serializable, O, R>
MapreduceRunnerResult runMapreduce(
Mapper<I, K, V> mapper,
Reducer<K, V, O> reducer,
Iterable<? extends Input<? extends I>> inputs,
Output<O, R> output) {
return runAsPipeline(createMapreduceJob(mapper, reducer, inputs, output));
}
private void checkCommonRequiredFields(Iterable<?> inputs, Mapper<?, ?, ?> mapper) {
checkNotNull(jobName, "jobName");
checkNotNull(moduleName, "moduleName");
checkArgumentNotNull(inputs, "inputs");
checkArgumentNotNull(mapper, "mapper");
}
private MapreduceRunnerResult runAsPipeline(Job0<?> job) {
String jobId =
newPipelineService()
.startNewPipeline(
job, new JobSetting.OnModule(moduleName), new JobSetting.OnQueue(QUEUE_NAME));
logger.atInfo().log(
"Started '%s' %s job: %s",
jobName, job instanceof MapJob ? "map" : "mapreduce", renderMapreduceConsoleLink(jobId));
return new MapreduceRunnerResult(jobId);
}
private String renderMapreduceConsoleLink(String jobId) {
return String.format(
MAPREDUCE_CONSOLE_LINK_FORMAT,
appEngineServiceUtils.convertToSingleSubdomain(
appEngineServiceUtils.getServiceHostname("backend")),
jobId);
}
/**
* Class representing the result of kicking off a mapreduce.
*
* <p>This is used to send a link to the mapreduce console.
*/
public class MapreduceRunnerResult {
private final String jobId;
private MapreduceRunnerResult(String jobId) {
this.jobId = jobId;
}
public void sendLinkToMapreduceConsole(Response response) {
response.setPayload(getLinkToMapreduceConsole() + "\n");
}
public String getLinkToMapreduceConsole() {
return renderMapreduceConsoleLink(jobId);
}
}
}

View file

@ -1,65 +0,0 @@
// Copyright 2018 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce;
import static com.google.common.collect.ImmutableList.toImmutableList;
import com.google.appengine.tools.mapreduce.Output;
import com.google.appengine.tools.mapreduce.OutputWriter;
import com.google.common.flogger.FluentLogger;
import google.registry.model.server.Lock;
import java.util.Collection;
import java.util.List;
import java.util.stream.Stream;
/** An App Engine MapReduce "Output" that releases the given {@link Lock}. */
public class UnlockerOutput<O> extends Output<O, Lock> {
private static final long serialVersionUID = 2884979908715512998L;
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private final Lock lock;
public UnlockerOutput(Lock lock) {
this.lock = lock;
}
private static class NoopWriter<O> extends OutputWriter<O> {
private static final long serialVersionUID = -8327197554987150393L;
@Override
public void write(O object) {
// Noop
}
@Override
public boolean allowSliceRetry() {
return true;
}
}
@Override
public List<NoopWriter<O>> createWriters(int numShards) {
return Stream.generate(NoopWriter<O>::new).limit(numShards).collect(toImmutableList());
}
@Override
public Lock finish(Collection<? extends OutputWriter<O>> writers) {
logger.atInfo().log("Mapreduce finished; releasing lock '%s'.", lock);
lock.release();
return lock;
}
}

View file

@ -1,54 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce.inputs;
import static google.registry.util.TypeUtils.checkNoInheritanceRelationships;
import com.google.appengine.tools.mapreduce.Input;
import com.google.appengine.tools.mapreduce.InputReader;
import com.google.common.collect.ImmutableSet;
import com.googlecode.objectify.Key;
import google.registry.model.EppResource;
import google.registry.model.ImmutableObject;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.index.EppResourceIndexBucket;
/**
* A MapReduce {@link Input} that loads all child objects of a given set of types, that are children
* of given {@link EppResource} types.
*/
@DeleteAfterMigration
class ChildEntityInput<R extends EppResource, I extends ImmutableObject>
extends EppResourceBaseInput<I> {
private static final long serialVersionUID = -3888034213150865008L;
private final ImmutableSet<Class<? extends R>> resourceClasses;
private final ImmutableSet<Class<? extends I>> childResourceClasses;
public ChildEntityInput(
ImmutableSet<Class<? extends R>> resourceClasses,
ImmutableSet<Class<? extends I>> childResourceClasses) {
this.resourceClasses = resourceClasses;
this.childResourceClasses = childResourceClasses;
checkNoInheritanceRelationships(ImmutableSet.copyOf(resourceClasses));
checkNoInheritanceRelationships(ImmutableSet.copyOf(childResourceClasses));
}
@Override
protected InputReader<I> bucketToReader(Key<EppResourceIndexBucket> bucketKey) {
return new ChildEntityReader<>(bucketKey, resourceClasses, childResourceClasses);
}
}

View file

@ -1,255 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce.inputs;
import static google.registry.model.EntityClasses.ALL_CLASSES;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import com.google.appengine.api.datastore.Cursor;
import com.google.appengine.api.datastore.QueryResultIterator;
import com.google.appengine.tools.mapreduce.InputReader;
import com.google.appengine.tools.mapreduce.ShardContext;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.annotation.Entity;
import google.registry.model.EppResource;
import google.registry.model.ImmutableObject;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.index.EppResourceIndex;
import google.registry.model.index.EppResourceIndexBucket;
import java.io.IOException;
import java.util.NoSuchElementException;
import javax.annotation.Nullable;
/**
* Reader that maps over {@link EppResourceIndex} and returns resources that are children of {@link
* EppResource} objects.
*/
@DeleteAfterMigration
class ChildEntityReader<R extends EppResource, I extends ImmutableObject> extends InputReader<I> {
private static final long serialVersionUID = 7481761146349663848L;
/** This reader uses an EppResourceEntityReader under the covers to iterate over EPP resources. */
private final EppResourceEntityReader<? extends R> eppResourceEntityReader;
/** The child resource classes to postfilter for. */
private final ImmutableList<Class<? extends I>> childResourceClasses;
/** The index within the list above for the next ofy query. */
private int childResourceClassIndex;
/** A reader used to go over children of the current eppResourceEntity and childResourceClass. */
@Nullable private ChildReader<? extends I> childReader;
public ChildEntityReader(
Key<EppResourceIndexBucket> bucketKey,
ImmutableSet<Class<? extends R>> resourceClasses,
ImmutableSet<Class<? extends I>> childResourceClasses) {
this.childResourceClasses = expandPolymorphicClasses(childResourceClasses);
this.eppResourceEntityReader = new EppResourceEntityReader<>(bucketKey, resourceClasses);
}
/** Expands non-entity polymorphic classes into their child types. */
@SuppressWarnings("unchecked")
private ImmutableList<Class<? extends I>> expandPolymorphicClasses(
ImmutableSet<Class<? extends I>> resourceClasses) {
ImmutableList.Builder<Class<? extends I>> builder = new ImmutableList.Builder<>();
for (Class<? extends I> clazz : resourceClasses) {
if (clazz.isAnnotationPresent(Entity.class)) {
builder.add(clazz);
} else {
for (Class<? extends ImmutableObject> entityClass : ALL_CLASSES) {
if (clazz.isAssignableFrom(entityClass)) {
builder.add((Class<? extends I>) entityClass);
}
}
}
}
return builder.build();
}
/**
* Get the next {@link ImmutableObject} (i.e. child element) from the query.
*
* @throws NoSuchElementException if there are no more EPP resources to iterate over.
*/
I nextChild() throws NoSuchElementException {
// This code implements a single iteration over a triple-nested loop. It returns the next
// innermost item of that 3-nested loop. The entire loop would look like this:
//
// NOTE: I'm treating eppResourceEntityReader and childReader as if they were iterables for
// brevity, although they aren't - they are Readers
//
// I'm also using the python 'yield' command to show we're returning this item one by one.
//
// for (eppResourceEntity : eppResourceEntityReader) {
// for (childResourceClass : childResourceClasses) {
// for (I child : ChildReader.create(childResourceClass, Key.create(eppResourceEntity)) {
// yield child; // returns the 'child's one by one.
// }
// }
// }
// First, set all the variables if they aren't set yet. This should only happen on the first
// time in the function.
//
// This can be merged with the calls in the "catch" below to avoid code duplication, but it
// makes the code harder to read.
if (childReader == null) {
childResourceClassIndex = 0;
childReader =
ChildReader.create(
childResourceClasses.get(childResourceClassIndex),
Key.create(eppResourceEntityReader.next()));
}
// Then continue advancing the 3-nested loop until we find a value
while (true) {
try {
// Advance the inner loop and return the next value.
return childReader.next();
} catch (NoSuchElementException e) {
// If we got here it means the inner loop (childQueryIterator) is done - we need to advance
// the middle loop by one, and then reset the inner loop.
childResourceClassIndex++;
// Check if the middle loop is done as well
if (childResourceClassIndex < childResourceClasses.size()) {
// The middle loop is not done. Reset the inner loop.
childReader = childReader.withType(childResourceClasses.get(childResourceClassIndex));
} else {
// We're done with the middle loop as well! Advance the outer loop, and reset the middle
// loop and inner loops
childResourceClassIndex = 0;
childReader =
ChildReader.create(
childResourceClasses.get(childResourceClassIndex),
Key.create(eppResourceEntityReader.next()));
}
// Loop back up the while, to try reading reading a value again
}
}
}
@Override
public I next() throws NoSuchElementException {
while (true) {
I entity = nextChild();
if (entity != null) {
// Postfilter to distinguish polymorphic types.
for (Class<? extends I> resourceClass : childResourceClasses) {
if (resourceClass.isInstance(entity)) {
return entity;
}
}
}
}
}
@Override
public void beginSlice() {
eppResourceEntityReader.beginSlice();
if (childReader != null) {
childReader.beginSlice();
}
}
@Override
public void endSlice() {
eppResourceEntityReader.endSlice();
if (childReader != null) {
childReader.endSlice();
}
}
@Override
public Double getProgress() {
return eppResourceEntityReader.getProgress();
}
@Override
public long estimateMemoryRequirement() {
return eppResourceEntityReader.estimateMemoryRequirement();
}
@Override
public ShardContext getContext() {
return eppResourceEntityReader.getContext();
}
@Override
public void setContext(ShardContext context) {
eppResourceEntityReader.setContext(context);
}
@Override
public void beginShard() {
eppResourceEntityReader.beginShard();
}
@Override
public void endShard() throws IOException {
eppResourceEntityReader.endShard();
}
private static class ChildReader<I> extends RetryingInputReader<I, I> {
private static final long serialVersionUID = -8443132445119657998L;
private final Class<I> type;
private final Key<?> ancestor;
/** Create a reader that goes over all the children of a given type to the given ancestor. */
public ChildReader(Class<I> type, Key<?> ancestor) {
this.type = type;
this.ancestor = ancestor;
// This reader isn't initialized by mapreduce, so we need to initialize it ourselves
beginShard();
beginSlice();
}
/**
* Create a reader that goes over all the children of a given type to the given ancestor.
*
* <p>We need this function in addition to the constructor so that we can create a ChildReader<?
* extends I>.
*/
public static <I> ChildReader<I> create(Class<I> type, Key<?> ancestor) {
return new ChildReader<I>(type, ancestor);
}
/** Query for children of the current resource and of the current child class. */
@Override
public QueryResultIterator<I> getQueryIterator(Cursor cursor) {
return startQueryAt(auditedOfy().load().type(type).ancestor(ancestor), cursor).iterator();
}
@Override
public int getTotal() {
return 0;
}
@Override
public I next() {
return nextQueryResult();
}
/** Returns a new ChildReader of the same ancestor for the given type. */
public <J> ChildReader<J> withType(Class<J> type) {
return create(type, ancestor);
}
}
}

View file

@ -1,112 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce.inputs;
import com.google.appengine.api.datastore.Key;
import com.google.appengine.tools.mapreduce.Input;
import com.google.appengine.tools.mapreduce.InputReader;
import com.google.common.collect.ImmutableList;
import java.io.IOException;
import java.util.List;
import java.util.NoSuchElementException;
/** A MapReduce {@link Input} adapter that chunks an input of keys into sublists of keys. */
public class ChunkingKeyInput extends Input<List<Key>> {
private static final long serialVersionUID = 1670202385246824694L;
private final Input<Key> input;
private final int chunkSize;
public ChunkingKeyInput(Input<Key> input, int chunkSize) {
this.input = input;
this.chunkSize = chunkSize;
}
/**
* An input reader that wraps around another input reader and returns its contents in chunks of
* a given size.
*/
private static class ChunkingKeyInputReader extends InputReader<List<Key>> {
private static final long serialVersionUID = 53502324675703263L;
private final InputReader<Key> reader;
private final int chunkSize;
ChunkingKeyInputReader(InputReader<Key> reader, int chunkSize) {
this.reader = reader;
this.chunkSize = chunkSize;
}
@Override
public List<Key> next() throws IOException {
ImmutableList.Builder<Key> chunk = new ImmutableList.Builder<>();
try {
for (int i = 0; i < chunkSize; i++) {
chunk.add(reader.next());
}
} catch (NoSuchElementException e) {
// Amazingly this is the recommended (and only) way to test for hasNext().
}
ImmutableList<Key> builtChunk = chunk.build();
if (builtChunk.isEmpty()) {
throw new NoSuchElementException(); // Maintain the contract.
}
return builtChunk;
}
@Override
public Double getProgress() {
return reader.getProgress();
}
@Override
public void beginShard() throws IOException {
reader.beginShard();
}
@Override
public void beginSlice() throws IOException {
reader.beginSlice();
}
@Override
public void endSlice() throws IOException {
reader.endSlice();
}
@Override
public void endShard() throws IOException {
reader.endShard();
}
@Override
public long estimateMemoryRequirement() {
// The reader's memory requirement plus the memory for this chunk's worth of buffered keys.
// 256 comes from DatastoreKeyInputReader.AVERAGE_KEY_SIZE.
return reader.estimateMemoryRequirement() + chunkSize * 256;
}
}
@Override
public List<InputReader<List<Key>>> createReaders() throws IOException {
ImmutableList.Builder<InputReader<List<Key>>> readers = new ImmutableList.Builder<>();
for (InputReader<Key> reader : input.createReaders()) {
readers.add(new ChunkingKeyInputReader(reader, chunkSize));
}
return readers.build();
}
}

View file

@ -1,64 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce.inputs;
import com.google.appengine.tools.mapreduce.Input;
import com.google.appengine.tools.mapreduce.InputReader;
import com.google.common.collect.ImmutableList;
import com.googlecode.objectify.Key;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.ofy.CommitLogBucket;
import google.registry.model.ofy.CommitLogManifest;
import java.util.List;
import javax.annotation.Nullable;
import org.joda.time.DateTime;
/** Base class for {@link Input} classes that map over {@link CommitLogManifest}. */
@DeleteAfterMigration
public class CommitLogManifestInput extends Input<Key<CommitLogManifest>> {
private static final long serialVersionUID = 6744322799131602384L;
/**
* Cutoff date for result.
*
* <p>If present, all resulting CommitLogManifest will be dated prior to this date. This can't be
* of type {@code Optional<DateTime>} because Optional purposely isn't Serializable.
*/
@Nullable
private final DateTime olderThan;
public CommitLogManifestInput() {
this.olderThan = null;
}
public CommitLogManifestInput(@Nullable DateTime olderThan) {
this.olderThan = olderThan;
}
@Override
public List<InputReader<Key<CommitLogManifest>>> createReaders() {
ImmutableList.Builder<InputReader<Key<CommitLogManifest>>> readers =
new ImmutableList.Builder<>();
for (Key<CommitLogBucket> bucketKey : CommitLogBucket.getAllBucketKeys()) {
readers.add(bucketToReader(bucketKey));
}
return readers.build();
}
private InputReader<Key<CommitLogManifest>> bucketToReader(Key<CommitLogBucket> bucketKey) {
return new CommitLogManifestReader(bucketKey, olderThan);
}
}

View file

@ -1,98 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce.inputs;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import com.google.appengine.api.datastore.Cursor;
import com.google.appengine.api.datastore.QueryResultIterator;
import com.google.appengine.tools.mapreduce.InputReader;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.cmd.Query;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.ofy.CommitLogBucket;
import google.registry.model.ofy.CommitLogManifest;
import java.util.NoSuchElementException;
import javax.annotation.Nullable;
import org.joda.time.DateTime;
/** {@link InputReader} that maps over {@link CommitLogManifest}. */
@DeleteAfterMigration
class CommitLogManifestReader
extends RetryingInputReader<Key<CommitLogManifest>, Key<CommitLogManifest>> {
/**
* Memory estimation for this reader.
*
* Elements are relatively small (parent key, Id, and a set of deleted keys), so this should be
* more than enough.
*/
private static final long MEMORY_ESTIMATE = 100 * 1024;
private static final long serialVersionUID = 6215490573108252100L;
private final Key<CommitLogBucket> bucketKey;
/**
* Cutoff date for result.
*
* If present, all resulting CommitLogManifest will be dated prior to this date.
*/
@Nullable
private final DateTime olderThan;
CommitLogManifestReader(Key<CommitLogBucket> bucketKey, @Nullable DateTime olderThan) {
this.bucketKey = bucketKey;
this.olderThan = olderThan;
}
@Override
public QueryResultIterator<Key<CommitLogManifest>> getQueryIterator(@Nullable Cursor cursor) {
return startQueryAt(createBucketQuery(), cursor).keys().iterator();
}
@Override
public int getTotal() {
return createBucketQuery().count();
}
/** Query for children of this bucket. */
Query<CommitLogManifest> createBucketQuery() {
Query<CommitLogManifest> query =
auditedOfy().load().type(CommitLogManifest.class).ancestor(bucketKey);
if (olderThan != null) {
query = query.filterKey(
"<",
Key.create(bucketKey, CommitLogManifest.class, olderThan.getMillis()));
}
return query;
}
/** Returns the estimated memory that will be used by this reader in bytes. */
@Override
public long estimateMemoryRequirement() {
return MEMORY_ESTIMATE;
}
/**
* Get the next {@link CommitLogManifest} from the query.
*
* @throws NoSuchElementException if there are no more elements.
*/
@Override
public Key<CommitLogManifest> next() {
return nextQueryResult();
}
}

View file

@ -1,65 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce.inputs;
import com.google.appengine.tools.mapreduce.Input;
import com.google.appengine.tools.mapreduce.InputReader;
import com.google.appengine.tools.mapreduce.inputs.ConcatenatingInputReader;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ListMultimap;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Set;
/**
* A MapReduce {@link Input} adapter that joins multiple inputs.
*
* @param <T> input type
*/
public class ConcatenatingInput<T> extends Input<T> {
private static final long serialVersionUID = 1225981408139437077L;
private final Set<? extends Input<? extends T>> inputs;
private final int numShards;
public ConcatenatingInput(Iterable<? extends Input<? extends T>> inputs, int numShards) {
this.inputs = ImmutableSet.copyOf(inputs);
this.numShards = numShards;
}
@Override
public List<InputReader<T>> createReaders() throws IOException {
ListMultimap<Integer, InputReader<T>> shards = ArrayListMultimap.create();
int i = 0;
for (Input<? extends T> input : inputs) {
for (InputReader<? extends T> reader : input.createReaders()) {
// Covariant cast is safe because an InputReader<I> only outputs I and never consumes it.
@SuppressWarnings("unchecked")
InputReader<T> typedReader = (InputReader<T>) reader;
shards.put(i % numShards, typedReader);
i++;
}
}
ImmutableList.Builder<InputReader<T>> concatenatingReaders = new ImmutableList.Builder<>();
for (Collection<InputReader<T>> shard : shards.asMap().values()) {
concatenatingReaders.add(new ConcatenatingInputReader<>(ImmutableList.copyOf(shard)));
}
return concatenatingReaders.build();
}
}

View file

@ -1,42 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce.inputs;
import com.google.appengine.tools.mapreduce.Input;
import com.google.appengine.tools.mapreduce.InputReader;
import com.google.common.collect.ImmutableList;
import com.googlecode.objectify.Key;
import google.registry.model.index.EppResourceIndex;
import google.registry.model.index.EppResourceIndexBucket;
import java.util.List;
/** Base class for {@link Input} classes that map over {@link EppResourceIndex}. */
abstract class EppResourceBaseInput<I> extends Input<I> {
private static final long serialVersionUID = -6681886718929462122L;
@Override
public List<InputReader<I>> createReaders() {
ImmutableList.Builder<InputReader<I>> readers = new ImmutableList.Builder<>();
for (Key<EppResourceIndexBucket> bucketKey : EppResourceIndexBucket.getAllBuckets()) {
readers.add(bucketToReader(bucketKey));
}
return readers.build();
}
/** Creates a reader that returns the resources under a bucket. */
protected abstract InputReader<I> bucketToReader(Key<EppResourceIndexBucket> bucketKey);
}

View file

@ -1,89 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce.inputs;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import com.google.appengine.api.datastore.Cursor;
import com.google.appengine.api.datastore.QueryResultIterator;
import com.google.appengine.tools.mapreduce.InputReader;
import com.google.common.collect.ImmutableSet;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.cmd.Query;
import google.registry.model.EppResource;
import google.registry.model.index.EppResourceIndex;
import google.registry.model.index.EppResourceIndexBucket;
import javax.annotation.Nullable;
/** Base class for {@link InputReader} classes that map over {@link EppResourceIndex}. */
abstract class EppResourceBaseReader<T> extends RetryingInputReader<EppResourceIndex, T> {
/** Number of bytes in 1MB of memory, used for memory estimates. */
static final long ONE_MB = 1024 * 1024;
private static final long serialVersionUID = 7942584269402339168L;
/**
* The resource kinds to filter for.
*
* <p>This can be empty, or any of {"ContactResource", "HostResource", "DomainBase"}. It will
* never contain "EppResource" since this isn't an actual kind in Datastore.
*/
private final ImmutableSet<String> filterKinds;
private final Key<EppResourceIndexBucket> bucketKey;
private final long memoryEstimate;
EppResourceBaseReader(
Key<EppResourceIndexBucket> bucketKey,
long memoryEstimate,
ImmutableSet<String> filterKinds) {
this.bucketKey = bucketKey;
this.memoryEstimate = memoryEstimate;
this.filterKinds = filterKinds;
}
@Override
public QueryResultIterator<EppResourceIndex> getQueryIterator(@Nullable Cursor cursor) {
return startQueryAt(query(), cursor).iterator();
}
@Override
public int getTotal() {
return query().count();
}
/** Query for children of this bucket. */
Query<EppResourceIndex> query() {
Query<EppResourceIndex> query =
auditedOfy().load().type(EppResourceIndex.class).ancestor(bucketKey);
return filterKinds.isEmpty() ? query : query.filter("kind in", filterKinds);
}
/** Returns the estimated memory that will be used by this reader in bytes. */
@Override
public long estimateMemoryRequirement() {
return memoryEstimate;
}
static <R extends EppResource> ImmutableSet<String> varargsToKinds(
ImmutableSet<Class<? extends R>> resourceClasses) {
// Ignore EppResource when finding kinds, since it doesn't have one and doesn't imply filtering.
return resourceClasses.contains(EppResource.class)
? ImmutableSet.of()
: resourceClasses.stream().map(Key::getKind).collect(toImmutableSet());
}
}

View file

@ -1,42 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce.inputs;
import static google.registry.util.TypeUtils.checkNoInheritanceRelationships;
import com.google.appengine.tools.mapreduce.Input;
import com.google.appengine.tools.mapreduce.InputReader;
import com.google.common.collect.ImmutableSet;
import com.googlecode.objectify.Key;
import google.registry.model.EppResource;
import google.registry.model.index.EppResourceIndexBucket;
/** A MapReduce {@link Input} that loads all {@link EppResource} objects of a given type. */
class EppResourceEntityInput<R extends EppResource> extends EppResourceBaseInput<R> {
private static final long serialVersionUID = 8162607479124406226L;
private final ImmutableSet<Class<? extends R>> resourceClasses;
public EppResourceEntityInput(ImmutableSet<Class<? extends R>> resourceClasses) {
this.resourceClasses = resourceClasses;
checkNoInheritanceRelationships(ImmutableSet.copyOf(resourceClasses));
}
@Override
protected InputReader<R> bucketToReader(Key<EppResourceIndexBucket> bucketKey) {
return new EppResourceEntityReader<>(bucketKey, resourceClasses);
}
}

View file

@ -1,80 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce.inputs;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import com.google.appengine.tools.mapreduce.InputReader;
import com.google.common.collect.ImmutableSet;
import com.google.common.flogger.FluentLogger;
import com.googlecode.objectify.Key;
import google.registry.model.EppResource;
import google.registry.model.index.EppResourceIndex;
import google.registry.model.index.EppResourceIndexBucket;
import java.util.NoSuchElementException;
/** Reader that maps over {@link EppResourceIndex} and returns resources. */
class EppResourceEntityReader<R extends EppResource> extends EppResourceBaseReader<R> {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private static final long serialVersionUID = -8042933349899971801L;
/**
* The resource classes to postfilter for.
*
* <p>This can be {@link EppResource} or any descendant classes, regardless of whether those
* classes map directly to a kind in Datastore, with the restriction that none of the classes
* is a supertype of any of the others.
*/
private final ImmutableSet<Class<? extends R>> resourceClasses;
public EppResourceEntityReader(
Key<EppResourceIndexBucket> bucketKey,
ImmutableSet<Class<? extends R>> resourceClasses) {
super(
bucketKey,
ONE_MB * 2, // Estimate 2MB of memory for this reader, since it loads a (max 1MB) entity.
varargsToKinds(resourceClasses));
this.resourceClasses = resourceClasses;
}
/**
* Called for each map invocation.
*
* @throws NoSuchElementException if there are no more elements, as specified in the
* {@link InputReader#next} Javadoc.
*/
@Override
public R next() throws NoSuchElementException {
// Loop until we find a value, or nextQueryResult() throws a NoSuchElementException.
while (true) {
Key<? extends EppResource> key = nextQueryResult().getKey();
EppResource resource = auditedOfy().load().key(key).now();
if (resource == null) {
logger.atSevere().log("EppResourceIndex key %s points at a missing resource.", key);
continue;
}
// Postfilter to distinguish polymorphic types (e.g. EppResources).
for (Class<? extends R> resourceClass : resourceClasses) {
if (resourceClass.isAssignableFrom(resource.getClass())) {
@SuppressWarnings("unchecked")
R r = (R) resource;
return r;
}
}
}
}
}

View file

@ -1,34 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce.inputs;
import com.google.appengine.tools.mapreduce.Input;
import com.google.appengine.tools.mapreduce.InputReader;
import com.googlecode.objectify.Key;
import google.registry.model.index.EppResourceIndex;
import google.registry.model.index.EppResourceIndexBucket;
/**
* A MapReduce {@link Input} that loads all {@link EppResourceIndex} entities.
*/
class EppResourceIndexInput extends EppResourceBaseInput<EppResourceIndex> {
private static final long serialVersionUID = -1231269296567279059L;
@Override
protected InputReader<EppResourceIndex> bucketToReader(Key<EppResourceIndexBucket> bucketKey) {
return new EppResourceIndexReader(bucketKey);
}
}

View file

@ -1,45 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce.inputs;
import com.google.appengine.tools.mapreduce.InputReader;
import com.google.common.collect.ImmutableSet;
import com.googlecode.objectify.Key;
import google.registry.model.index.EppResourceIndex;
import google.registry.model.index.EppResourceIndexBucket;
import java.util.NoSuchElementException;
/** Reader that maps over {@link EppResourceIndex} and returns the index objects themselves. */
class EppResourceIndexReader extends EppResourceBaseReader<EppResourceIndex> {
private static final long serialVersionUID = -4816383426796766911L;
public EppResourceIndexReader(Key<EppResourceIndexBucket> bucketKey) {
// Estimate 1MB of memory for this reader, which is massive overkill.
// Use an empty set for the filter kinds, which disables filtering.
super(bucketKey, ONE_MB, ImmutableSet.of());
}
/**
* Called for each map invocation.
*
* @throws NoSuchElementException if there are no more elements, as specified in the
* {@link InputReader#next} Javadoc.
*/
@Override
public EppResourceIndex next() throws NoSuchElementException {
return nextQueryResult();
}
}

View file

@ -1,91 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce.inputs;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.Lists.asList;
import com.google.appengine.tools.mapreduce.Input;
import com.google.common.collect.ImmutableSet;
import com.googlecode.objectify.Key;
import google.registry.model.EppResource;
import google.registry.model.ImmutableObject;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.index.EppResourceIndex;
/**
* Mapreduce helpers for {@link EppResource} keys and objects.
*
* <p>The inputs provided by this class are not deletion-aware and do not project the resources
* forward in time. That is the responsibility of mappers that use these inputs.
*/
@DeleteAfterMigration
public final class EppResourceInputs {
private EppResourceInputs() {}
/** Returns a MapReduce {@link Input} that loads all {@link EppResourceIndex} objects. */
public static <R extends EppResource> Input<EppResourceIndex> createIndexInput() {
return new EppResourceIndexInput();
}
/**
* Returns a MapReduce {@link Input} that loads all {@link EppResource} objects of a given type,
* including deleted resources.
*
* <p>Note: Do not concatenate multiple EntityInputs together (this is inefficient as it iterates
* through all buckets multiple times). Specify the types in a single input, or load all types by
* specifying {@link EppResource} as the class.
*/
@SafeVarargs
public static <R extends EppResource> Input<R> createEntityInput(
Class<? extends R> resourceClass,
Class<? extends R>... moreResourceClasses) {
return new EppResourceEntityInput<>(
ImmutableSet.copyOf(asList(resourceClass, moreResourceClasses)));
}
/**
* Returns a MapReduce {@link Input} that loads all {@link ImmutableObject} objects of a given
* type, including deleted resources, that are child entities of all {@link EppResource} objects
* of a given type.
*
* <p>Note: Do not concatenate multiple EntityInputs together (this is inefficient as it iterates
* through all buckets multiple times). Specify the types in a single input, or load all types by
* specifying {@link EppResource} and/or {@link ImmutableObject} as the class.
*/
public static <R extends EppResource, I extends ImmutableObject> Input<I> createChildEntityInput(
ImmutableSet<Class<? extends R>> parentClasses,
ImmutableSet<Class<? extends I>> childClasses) {
checkArgument(!parentClasses.isEmpty(), "Must provide at least one parent type.");
checkArgument(!childClasses.isEmpty(), "Must provide at least one child type.");
return new ChildEntityInput<>(parentClasses, childClasses);
}
/**
* Returns a MapReduce {@link Input} that loads keys to all {@link EppResource} objects of a given
* type, including deleted resources.
*
* <p>Note: Do not concatenate multiple KeyInputs together (this is inefficient as it iterates
* through all buckets multiple times). Specify the types in a single input, or load all types by
* specifying {@link EppResource} as the class.
*/
@SafeVarargs
public static <R extends EppResource> Input<Key<R>> createKeyInput(
Class<? extends R> resourceClass, Class<? extends R>... moreResourceClasses) {
return new EppResourceKeyInput<>(
ImmutableSet.copyOf(asList(resourceClass, moreResourceClasses)));
}
}

View file

@ -1,48 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce.inputs;
import static google.registry.util.TypeUtils.checkNoInheritanceRelationships;
import com.google.appengine.tools.mapreduce.Input;
import com.google.appengine.tools.mapreduce.InputReader;
import com.google.common.collect.ImmutableSet;
import com.googlecode.objectify.Key;
import google.registry.model.EppResource;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.index.EppResourceIndexBucket;
/**
* A MapReduce {@link Input} that loads keys to all {@link EppResource} objects of a given type.
*
* <p>When mapping over keys we can't distinguish between Objectify polymorphic types.
*/
@DeleteAfterMigration
class EppResourceKeyInput<R extends EppResource> extends EppResourceBaseInput<Key<R>> {
private static final long serialVersionUID = -5426821384707653743L;
private final ImmutableSet<Class<? extends R>> resourceClasses;
public EppResourceKeyInput(ImmutableSet<Class<? extends R>> resourceClasses) {
this.resourceClasses = resourceClasses;
checkNoInheritanceRelationships(ImmutableSet.copyOf(resourceClasses));
}
@Override
protected InputReader<Key<R>> bucketToReader(Key<EppResourceIndexBucket> bucketKey) {
return new EppResourceKeyReader<>(bucketKey, resourceClasses);
}
}

View file

@ -1,56 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce.inputs;
import com.google.appengine.tools.mapreduce.InputReader;
import com.google.common.collect.ImmutableSet;
import com.googlecode.objectify.Key;
import google.registry.model.EppResource;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.index.EppResourceIndex;
import google.registry.model.index.EppResourceIndexBucket;
import java.util.NoSuchElementException;
/**
* Reader that maps over {@link EppResourceIndex} and returns resource keys.
*
* <p>When mapping over keys we can't distinguish between Objectify polymorphic types.
*/
@DeleteAfterMigration
class EppResourceKeyReader<R extends EppResource> extends EppResourceBaseReader<Key<R>> {
private static final long serialVersionUID = -428232054739189774L;
public EppResourceKeyReader(
Key<EppResourceIndexBucket> bucketKey, ImmutableSet<Class<? extends R>> resourceClasses) {
super(
bucketKey,
ONE_MB, // Estimate 1MB of memory for this reader, which is massive overkill.
varargsToKinds(resourceClasses));
}
/**
* Called for each map invocation.
*
* @throws NoSuchElementException if there are no more elements, as specified in the
* {@link InputReader#next} Javadoc.
*/
@Override
@SuppressWarnings("unchecked")
public Key<R> next() throws NoSuchElementException {
// This is a safe cast because we filtered on kind inside the query.
return (Key<R>) nextQueryResult().getKey();
}
}

View file

@ -1,53 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce.inputs;
import com.google.appengine.tools.mapreduce.Input;
import com.google.appengine.tools.mapreduce.InputReader;
import com.google.common.collect.ImmutableList;
import java.util.List;
import java.util.NoSuchElementException;
/** An input that returns a single {@code null} value. */
public class NullInput<T> extends Input<T> {
private static final long serialVersionUID = 1816836937031979851L;
private static final class NullReader<T> extends InputReader<T> {
private static final long serialVersionUID = -8176201363578913125L;
boolean read = false;
@Override
public T next() throws NoSuchElementException {
if (read) {
throw new NoSuchElementException();
}
read = true;
return null;
}
@Override
public Double getProgress() {
return read ? 1.0 : 0.0;
}
}
@Override
public List<? extends InputReader<T>> createReaders() {
return ImmutableList.of(new NullReader<T>());
}
}

View file

@ -1,168 +0,0 @@
// Copyright 2018 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.mapreduce.inputs;
import static com.google.common.base.Preconditions.checkNotNull;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import com.google.appengine.api.datastore.Cursor;
import com.google.appengine.api.datastore.DatastoreTimeoutException;
import com.google.appengine.api.datastore.QueryResultIterator;
import com.google.appengine.tools.mapreduce.InputReader;
import com.google.common.flogger.FluentLogger;
import com.googlecode.objectify.cmd.Query;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.util.Retrier;
import google.registry.util.SystemSleeper;
import java.util.NoSuchElementException;
import javax.annotation.Nullable;
/**
* A reader over objectify query that retries reads on failure.
*
* <p>When doing a mapreduce over a large number of elements from Datastore, the random
* DatastoreTimeoutExceptions that happen sometimes can eventually add up and cause the entire
* mapreduce to fail.
*
* <p>This base RetryingInputReader will automatically retry any DatastoreTimeoutException to
* minimize the failures.
*
* <p>I is the internal Objectify read type, while T is the InputReader return type.
*/
@DeleteAfterMigration
abstract class RetryingInputReader<I, T> extends InputReader<T> {
private static final long serialVersionUID = -4897677478541818899L;
private static final Retrier retrier = new Retrier(new SystemSleeper(), 5);
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
@Nullable private Cursor cursor;
private int total;
private int loaded;
private transient QueryResultIterator<I> queryIterator;
/**
* Return the iterator over Query results, starting at the cursor location.
*
* <p>Must always return an iterator over the same query.
*
* <p>The underlying {@link Query} must have an ancestor filter, so that it is strongly
* consistent. According to the documentation at
* https://cloud.google.com/appengine/docs/java/datastore/queries#Java_Data_consistency
*
* <p>"strongly consistent queries are always transactionally consistent". However, each time we
* restart the query at a cursor we have a new effective query, and "if the results for a query
* change between uses of a cursor, the query notices only changes that occur in results after the
* cursor. If a new result appears before the cursor's position for the query, it will not be
* returned when the results after the cursor are fetched."
*
* <p>What this means in practice is that entities that are created after the initial query begins
* may or may not be seen by this reader, depending on whether the query was paused and restarted
* with a cursor before it would have reached the new entity.
*
* @param cursor the initial location for the iterator to start from. If null - start from
* beginning.
*/
public abstract QueryResultIterator<I> getQueryIterator(@Nullable Cursor cursor);
/**
* Return the total number of elements the iterator goes over.
*
* <p>The results are cached - this function will only be called once on the start of the shard,
* or when the iterator is reset.
*
* <p>The results are only used for debugging / progress display. It is safe to return 0.
*/
public abstract int getTotal();
/**
* Return the next item of this InputReader.
*
* <p>You probably want to use {@link #nextQueryResult} internally when preparing the next item.
* It is OK to call {@link #nextQueryResult} multiple times.
*/
@Override
public abstract T next();
/** Called once at start. Cache the expected size. */
@Override
public void beginShard() {
total = getTotal();
}
/** Called every time we are deserialized. Create a new query or resume an existing one. */
@Override
public void beginSlice() {
queryIterator = getQueryIterator(cursor);
}
/** Called occasionally alongside {@link #next}. */
@Override
public Double getProgress() {
// Cap progress at 1.0, since the query's count() can increase during the run of the mapreduce
// if more entities are written, but we've cached the value once in "total".
return Math.min(1.0, ((double) loaded) / Math.max(1, total));
}
/** Called before we are serialized. Save a serializable cursor for this query. */
@Override
public void endSlice() {
cursor = queryIterator.getCursor();
}
/**
* Get the next item from the query results.
*
* <p>Use this to create the next() function.
*
* @throws NoSuchElementException if there are no more elements.
*/
protected final I nextQueryResult() {
cursor = queryIterator.getCursor();
loaded++;
try {
return retrier.callWithRetry(
() -> queryIterator.next(),
(thrown, failures, maxAttempts) -> {
checkNotNull(cursor, "Can't retry because cursor is null. Giving up.");
logger.atInfo().withCause(thrown).log(
"Retriable failure while reading item %d/%d - attempt %d/%d.",
loaded, total, failures, maxAttempts);
queryIterator = getQueryIterator(cursor);
},
DatastoreTimeoutException.class);
} catch (NoSuchElementException e) {
// We expect NoSuchElementException to be thrown, and it isn't an error. Just rethrow.
throw e;
} catch (Throwable e) {
throw new RuntimeException(
String.format("Got an unrecoverable failure while reading item %d/%d.", loaded, total),
e);
} finally {
auditedOfy().clearSessionCache();
}
}
/**
* Utility function to start a query from a given nullable cursor.
*
* @param query the query to work on
* @param cursor the location to start from. If null - starts from the beginning.
*/
public static <T> Query<T> startQueryAt(Query<T> query, @Nullable Cursor cursor) {
return (cursor == null) ? query : query.startAt(cursor);
}
}

View file

@ -30,11 +30,6 @@ import google.registry.model.host.HostResource;
import google.registry.model.index.EppResourceIndex;
import google.registry.model.index.EppResourceIndexBucket;
import google.registry.model.index.ForeignKeyIndex;
import google.registry.model.ofy.CommitLogBucket;
import google.registry.model.ofy.CommitLogCheckpoint;
import google.registry.model.ofy.CommitLogCheckpointRoot;
import google.registry.model.ofy.CommitLogManifest;
import google.registry.model.ofy.CommitLogMutation;
import google.registry.model.poll.PollMessage;
import google.registry.model.rde.RdeRevision;
import google.registry.model.registrar.Registrar;
@ -58,11 +53,6 @@ public final class EntityClasses {
BillingEvent.Modification.class,
BillingEvent.OneTime.class,
BillingEvent.Recurring.class,
CommitLogBucket.class,
CommitLogCheckpoint.class,
CommitLogCheckpointRoot.class,
CommitLogManifest.class,
CommitLogMutation.class,
ContactHistory.class,
ContactResource.class,
Cursor.class,

View file

@ -31,14 +31,11 @@ import com.github.benmanes.caffeine.cache.LoadingCache;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSortedMap;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.annotation.Id;
import com.googlecode.objectify.annotation.Index;
import google.registry.config.RegistryConfig;
import google.registry.model.CacheUtils.AppEngineEnvironmentCacheLoader;
import google.registry.model.eppcommon.StatusValue;
import google.registry.model.ofy.CommitLogManifest;
import google.registry.model.transfer.TransferData;
import google.registry.persistence.VKey;
import google.registry.util.NonFinalForTesting;
@ -144,17 +141,6 @@ public abstract class EppResource extends BackupGroupRoot implements Buildable {
// TODO(b/177567432): rename to "statuses" once we're off datastore.
Set<StatusValue> status;
/**
* Sorted map of {@link DateTime} keys (modified time) to {@link CommitLogManifest} entries.
*
* <p><b>Note:</b> Only the last revision on a given date is stored. The key is the transaction
* timestamp, not midnight.
*
* @see google.registry.model.translators.CommitLogRevisionsTranslatorFactory
*/
@Transient @DoNotCompare
ImmutableSortedMap<DateTime, Key<CommitLogManifest>> revisions = ImmutableSortedMap.of();
public String getRepoId() {
return repoId;
}
@ -206,10 +192,6 @@ public abstract class EppResource extends BackupGroupRoot implements Buildable {
return deletionTime;
}
public ImmutableSortedMap<DateTime, Key<CommitLogManifest>> getRevisions() {
return nullToEmptyImmutableCopy(revisions);
}
/** Return a clone of the resource with timed status values modified using the given time. */
public abstract EppResource cloneProjectedAtTime(DateTime now);

View file

@ -38,8 +38,6 @@ import google.registry.model.domain.DomainBase;
import google.registry.model.eppcommon.StatusValue;
import google.registry.model.host.HostResource;
import google.registry.model.index.ForeignKeyIndex;
import google.registry.model.ofy.CommitLogManifest;
import google.registry.model.ofy.CommitLogMutation;
import google.registry.model.reporting.HistoryEntry;
import google.registry.model.reporting.HistoryEntryDao;
import google.registry.model.tld.Registry;
@ -49,7 +47,6 @@ import google.registry.model.transfer.TransferStatus;
import google.registry.persistence.VKey;
import java.util.Comparator;
import java.util.List;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.function.Function;
import java.util.function.Supplier;
@ -271,32 +268,7 @@ public final class EppResourceUtils {
* perform a single fetch operation.
*
* <p><b>Warning:</b> A resource can only be rolled backwards in time, not forwards; therefore
* {@code resource} should be whatever's currently in Datastore.
*
* <p><b>Warning:</b> In Datastore, revisions are granular to 24-hour periods. It's recommended
* that {@code timestamp} be set to midnight. If you don't use midnight, you must take into
* consideration that under certain circumstances, a resource might be restored to a revision on
* the previous day, even if there were revisions made earlier on the same date as {@code
* timestamp}; however, a resource will never be restored to a revision occurring after {@code
* timestamp}. This behavior is due to the way {@link
* google.registry.model.translators.CommitLogRevisionsTranslatorFactory
* CommitLogRevisionsTranslatorFactory} manages the {@link EppResource#revisions} field. Please
* note however that the creation and deletion times of a resource are granular to the
* millisecond.
*
* <p>Example: a resource in Datastore has three revisions A, B, and C
*
* <ul>
* <li>A: Day 0, 1pm
* <li>B: Day 1, 1pm
* <li>C: Day 1, 3pm
* </ul>
*
* <p>If one requests the resource as of day 1 at 2pm, we will return revision A because as far as
* the commit logs are concerned, revision C completely overwrites the existence of revision B.
*
* <p>When using the SQL backend (post-Registry-3.0-migration) this restriction goes away and
* objects can be restored to any revision.
* {@code resource} should be whatever's currently in SQL.
*
* @return the resource at {@code timestamp} or {@code null} if resource is deleted or not yet
* created
@ -344,49 +316,6 @@ public final class EppResourceUtils {
*/
private static <T extends EppResource> T loadMostRecentRevisionAtTime(
final T resource, final DateTime timestamp) {
if (tm().isOfy()) {
return loadMostRecentRevisionAtTimeDatastore(resource, timestamp);
} else {
return loadMostRecentRevisionAtTimeSql(resource, timestamp);
}
}
/**
* Returns the most recent Datastore revision of a given EppResource before or at the provided
* timestamp using the EppResource revisions map, falling back to using the resource as-is if
* there are no revisions.
*
* @see #loadAtPointInTimeAsync(EppResource, DateTime)
*/
private static <T extends EppResource> T loadMostRecentRevisionAtTimeDatastore(
final T resource, final DateTime timestamp) {
final Key<T> resourceKey = Key.create(resource);
final Key<CommitLogManifest> revision =
findMostRecentDatastoreRevisionAtTime(resource, timestamp);
if (revision == null) {
logger.atSevere().log("No revision found for %s, falling back to resource.", resourceKey);
return resource;
}
final CommitLogMutation mutation =
auditedOfy().load().key(CommitLogMutation.createKey(revision, resourceKey)).now();
if (mutation != null) {
return auditedOfy().load().fromEntity(mutation.getEntity());
}
logger.atSevere().log(
"Couldn't load mutation for revision at %s for %s, falling back to resource."
+ " Revision: %s",
timestamp, resourceKey, revision);
return resource;
}
/**
* Returns the most recent SQL revision of a given EppResource before or at the provided timestamp
* using *History objects, falling back to using the resource as-is if there are no revisions.
*
* @see #loadAtPointInTimeAsync(EppResource, DateTime)
*/
private static <T extends EppResource> T loadMostRecentRevisionAtTimeSql(
T resource, DateTime timestamp) {
@SuppressWarnings("unchecked")
T resourceAtPointInTime =
(T)
@ -405,30 +334,6 @@ public final class EppResourceUtils {
return resourceAtPointInTime;
}
@Nullable
private static <T extends EppResource>
Key<CommitLogManifest> findMostRecentDatastoreRevisionAtTime(
final T resource, final DateTime timestamp) {
final Key<T> resourceKey = Key.create(resource);
Entry<?, Key<CommitLogManifest>> revision = resource.getRevisions().floorEntry(timestamp);
if (revision != null) {
logger.atInfo().log(
"Found revision history at %s for %s: %s", timestamp, resourceKey, revision);
return revision.getValue();
}
// Fall back to the earliest revision if we don't have one before the requested timestamp.
revision = resource.getRevisions().firstEntry();
if (revision != null) {
logger.atSevere().log(
"Found no revision history at %s for %s, using earliest revision: %s",
timestamp, resourceKey, revision);
return revision.getValue();
}
// Ultimate fallback: There are no revisions whatsoever, so return null.
logger.atSevere().log("Found no revision history at all for %s", resourceKey);
return null;
}
/**
* Returns a set of {@link VKey} for domains that reference a specified contact or host.
*

View file

@ -87,8 +87,8 @@ public enum StatusValue implements EppEnum {
* A status for a resource indicating that deletion has been requested but has not yet happened.
*
* <p>Contacts and hosts are deleted asynchronously because we need to check their incoming
* references with strong consistency, requiring a mapreduce, and during that asynchronous process
* they have the PENDING_DELETE status.
* references with strong consistency, requiring an asynchronous process, and during that
* asynchronous process they have the PENDING_DELETE status.
*
* <p>Domains in the add grace period are deleted synchronously and do not ever have this status.
* Otherwise, domains go through an extended deletion process, consisting of a 30-day redemption

View file

@ -1,169 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.model.ofy;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.DiscreteDomain.integers;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import static google.registry.config.RegistryConfig.getCommitLogBucketCount;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import static google.registry.util.DateTimeUtils.START_OF_TIME;
import com.google.common.collect.ContiguousSet;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSortedSet;
import com.google.common.collect.Range;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.annotation.Entity;
import com.googlecode.objectify.annotation.Id;
import google.registry.config.RegistryConfig;
import google.registry.model.Buildable;
import google.registry.model.ImmutableObject;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.annotations.NotBackedUp;
import google.registry.model.annotations.NotBackedUp.Reason;
import google.registry.util.NonFinalForTesting;
import java.util.Random;
import java.util.function.Supplier;
import org.joda.time.DateTime;
/**
* Root for a random commit log bucket.
*
* <p>This is used to shard {@link CommitLogManifest} objects into {@link
* RegistryConfig#getCommitLogBucketCount() N} entity groups. This increases transaction throughput,
* while maintaining the ability to perform strongly-consistent ancestor queries.
*
* @see <a href="https://cloud.google.com/appengine/articles/scaling/contention">Avoiding Datastore
* contention</a>
*/
@Entity
@NotBackedUp(reason = Reason.COMMIT_LOGS)
@DeleteAfterMigration
public class CommitLogBucket extends ImmutableObject implements Buildable {
/**
* Ranges from 1 to {@link RegistryConfig#getCommitLogBucketCount()}, inclusive; starts at 1 since
* IDs can't be 0.
*/
@Id long bucketNum;
/** The timestamp of the last {@link CommitLogManifest} written to this bucket. */
DateTime lastWrittenTime = START_OF_TIME;
public int getBucketNum() {
return (int) bucketNum;
}
public DateTime getLastWrittenTime() {
return lastWrittenTime;
}
/**
* Returns the key for the specified bucket ID.
*
* <p>Always use this method in preference to manually creating bucket keys, since manual keys
* are not guaranteed to have a valid bucket ID number.
*/
public static Key<CommitLogBucket> getBucketKey(int num) {
checkArgument(getBucketIdRange().contains(num), "%s not in %s", num, getBucketIdRange());
return getBucketKeyUnsafe(num);
}
private static Key<CommitLogBucket> getBucketKeyUnsafe(int num) {
return Key.create(CommitLogBucket.class, num);
}
/** Returns a sorted set of all the possible numeric bucket IDs. */
public static ImmutableSortedSet<Integer> getBucketIds() {
return ContiguousSet.create(getBucketIdRange(), integers());
}
private static Range<Integer> getBucketIdRange() {
return Range.closed(1, getCommitLogBucketCount());
}
/** Returns an arbitrary numeric bucket ID. Default behavior is randomly chosen IDs. */
public static int getArbitraryBucketId() {
return bucketIdSupplier.get();
}
/**
* Supplier of valid bucket IDs to use for {@link #getArbitraryBucketId()}.
*
* <p>Default supplier is one that returns bucket IDs via uniform random selection, but can be
* overridden in tests that rely on predictable bucket assignment for commit logs.
*/
@NonFinalForTesting
private static Supplier<Integer> bucketIdSupplier =
new Supplier<Integer>() {
private final Random random = new Random();
@Override
public Integer get() {
return random.nextInt(getCommitLogBucketCount()) + 1; // Add 1 since IDs can't be 0.
}
};
/** Returns the loaded bucket for the given key, or a new object if the bucket doesn't exist. */
public static CommitLogBucket loadBucket(Key<CommitLogBucket> bucketKey) {
CommitLogBucket bucket = auditedOfy().load().key(bucketKey).now();
return (bucket == null)
? new CommitLogBucket.Builder().setBucketNum(bucketKey.getId()).build()
: bucket;
}
/** Returns the set of all loaded commit log buckets, filling in missing buckets with new ones. */
public static ImmutableSet<CommitLogBucket> loadAllBuckets() {
auditedOfy().load().keys(getAllBucketKeys()); // Load all buckets into session cache at once.
ImmutableSet.Builder<CommitLogBucket> allBuckets = new ImmutableSet.Builder<>();
for (Key<CommitLogBucket> key : getAllBucketKeys()) {
allBuckets.add(loadBucket(key));
}
return allBuckets.build();
}
/** Returns all commit log bucket keys, in ascending order by bucket ID. */
public static ImmutableSet<Key<CommitLogBucket>> getAllBucketKeys() {
return getBucketIds()
.stream()
.map(CommitLogBucket::getBucketKeyUnsafe)
.collect(toImmutableSet());
}
@Override
public Builder asBuilder() {
return new Builder(clone(this));
}
/** A builder for {@link CommitLogBucket} since it is immutable. */
public static class Builder extends Buildable.Builder<CommitLogBucket> {
public Builder() {}
public Builder(CommitLogBucket instance) {
super(instance);
}
public Builder setBucketNum(long bucketNum) {
getInstance().bucketNum = bucketNum;
return this;
}
public Builder setLastWrittenTime(DateTime lastWrittenTime) {
getInstance().lastWrittenTime = lastWrittenTime;
return this;
}
}
}

View file

@ -1,111 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.model.ofy;
import static com.google.common.base.Preconditions.checkArgument;
import static org.joda.time.DateTimeZone.UTC;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.annotation.Entity;
import com.googlecode.objectify.annotation.Id;
import com.googlecode.objectify.annotation.Parent;
import google.registry.model.ImmutableObject;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.annotations.NotBackedUp;
import google.registry.model.annotations.NotBackedUp.Reason;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import org.joda.time.DateTime;
/**
* Entity representing a point-in-time consistent view of Datastore, based on commit logs.
*
* <p>Conceptually, this entity consists of two pieces of information: the checkpoint "wall" time
* and a set of bucket checkpoint times. The former is the ID for this checkpoint (constrained to be
* unique upon checkpoint creation) and also represents the approximate wall time of the consistent
* Datastore view this checkpoint represents. The latter is really a mapping from bucket ID to
* timestamp, where the timestamp dictates the upper bound (inclusive) on commit logs from that
* bucket to include when restoring Datastore to this checkpoint.
*/
@Entity
@NotBackedUp(reason = Reason.COMMIT_LOGS)
@DeleteAfterMigration
public class CommitLogCheckpoint extends ImmutableObject {
/** Shared singleton parent entity for commit log checkpoints. */
@Parent
Key<CommitLogCheckpointRoot> parent = CommitLogCheckpointRoot.getKey();
/** The checkpoint's approximate "wall" time (in millis since the epoch). */
@Id
long checkpointTime;
/** Bucket checkpoint times for this checkpoint, ordered to match up with buckets 1-N. */
List<DateTime> bucketTimestamps = new ArrayList<>();
public DateTime getCheckpointTime() {
return new DateTime(checkpointTime, UTC);
}
/** Returns the bucket checkpoint times as a map from bucket ID to commit timestamp. */
public ImmutableMap<Integer, DateTime> getBucketTimestamps() {
ImmutableMap.Builder<Integer, DateTime> builder = new ImmutableMap.Builder<>();
for (int i = 0; i < bucketTimestamps.size(); ++i) {
// Add 1 to map the bucket timestamps properly to buckets indexed from 1-N.
builder.put(i + 1, bucketTimestamps.get(i));
}
return builder.build();
}
/**
* Creates a CommitLogCheckpoint for the given wall time and bucket checkpoint times, specified as
* a map from bucket ID to bucket commit timestamp.
*/
public static CommitLogCheckpoint create(
DateTime checkpointTime, ImmutableMap<Integer, DateTime> bucketTimestamps) {
checkArgument(
Objects.equals(CommitLogBucket.getBucketIds().asList(), bucketTimestamps.keySet().asList()),
"Bucket ids are incorrect: %s",
bucketTimestamps.keySet());
CommitLogCheckpoint instance = new CommitLogCheckpoint();
instance.checkpointTime = checkpointTime.getMillis();
instance.bucketTimestamps = ImmutableList.copyOf(bucketTimestamps.values());
return instance;
}
/**
* Creates a CommitLogCheckpoint for the given wall time and bucket checkpoint times. Test only.
*
* <p>This lacks validation on the bucketTimestamps map.
*/
@VisibleForTesting
public static CommitLogCheckpoint createForTest(
DateTime checkpointTime, ImmutableMap<Integer, DateTime> bucketTimestamps) {
CommitLogCheckpoint instance = new CommitLogCheckpoint();
instance.checkpointTime = checkpointTime.getMillis();
instance.bucketTimestamps = ImmutableList.copyOf(bucketTimestamps.values());
return instance;
}
/** Creates a key for the CommitLogCheckpoint for the given wall time. */
public static Key<CommitLogCheckpoint> createKey(DateTime checkpointTime) {
return Key.create(
CommitLogCheckpointRoot.getKey(), CommitLogCheckpoint.class, checkpointTime.getMillis());
}
}

View file

@ -1,62 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.model.ofy;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import static google.registry.util.DateTimeUtils.START_OF_TIME;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.annotation.Entity;
import com.googlecode.objectify.annotation.Id;
import google.registry.model.ImmutableObject;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.annotations.NotBackedUp;
import google.registry.model.annotations.NotBackedUp.Reason;
import org.joda.time.DateTime;
/** Singleton parent entity for all commit log checkpoints. */
@Entity
@NotBackedUp(reason = Reason.COMMIT_LOGS)
@DeleteAfterMigration
public class CommitLogCheckpointRoot extends ImmutableObject {
public static final long SINGLETON_ID = 1; // There is always exactly one of these.
@Id
long id = SINGLETON_ID;
/** Singleton key for CommitLogCheckpointParent. */
public static Key<CommitLogCheckpointRoot> getKey() {
return Key.create(CommitLogCheckpointRoot.class, SINGLETON_ID);
}
/** The timestamp of the last {@link CommitLogCheckpoint} written. */
DateTime lastWrittenTime = START_OF_TIME;
public DateTime getLastWrittenTime() {
return lastWrittenTime;
}
public static CommitLogCheckpointRoot loadRoot() {
CommitLogCheckpointRoot root = auditedOfy().load().key(getKey()).now();
return root == null ? new CommitLogCheckpointRoot() : root;
}
public static CommitLogCheckpointRoot create(DateTime lastWrittenTime) {
CommitLogCheckpointRoot instance = new CommitLogCheckpointRoot();
instance.lastWrittenTime = lastWrittenTime;
return instance;
}
}

View file

@ -1,89 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.model.ofy;
import static google.registry.util.CollectionUtils.nullToEmptyImmutableCopy;
import static org.joda.time.DateTimeZone.UTC;
import com.google.common.collect.ImmutableSet;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.annotation.Entity;
import com.googlecode.objectify.annotation.Id;
import com.googlecode.objectify.annotation.Parent;
import google.registry.model.ImmutableObject;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.annotations.NotBackedUp;
import google.registry.model.annotations.NotBackedUp.Reason;
import java.util.LinkedHashSet;
import java.util.Set;
import org.joda.time.DateTime;
/**
* Archived Datastore transaction that can be replayed.
*
* <p>Entities of this kind are entity group sharded using a {@link CommitLogBucket} parent. Each
* object that was saved during this transaction is stored in a {@link CommitLogMutation} child
* entity.
*/
@Entity
@NotBackedUp(reason = Reason.COMMIT_LOGS)
@DeleteAfterMigration
public class CommitLogManifest extends ImmutableObject {
/** Commit log manifests are parented on a random bucket. */
@Parent
Key<CommitLogBucket> parent;
/**
* The commit time (in millis since the epoch).
*
* <p>This will be unique among siblings sharing the same parent {@link CommitLogBucket}.
*/
@Id
long commitTime;
/** Keys that were deleted in this commit. (Saves are recorded in child entities.) */
Set<Key<?>> deletions = new LinkedHashSet<>();
public DateTime getCommitTime() {
return new DateTime(commitTime, UTC);
}
public int getBucketId() {
return (int) parent.getId();
}
public ImmutableSet<Key<?>> getDeletions() {
return nullToEmptyImmutableCopy(deletions);
}
public static CommitLogManifest create(
Key<CommitLogBucket> parent, DateTime commitTime, Set<Key<?>> deletions) {
CommitLogManifest instance = new CommitLogManifest();
instance.parent = parent;
instance.commitTime = commitTime.getMillis();
instance.deletions = nullToEmptyImmutableCopy(deletions);
return instance;
}
public static Key<CommitLogManifest> createKey(Key<CommitLogBucket> parent, DateTime commitTime) {
return Key.create(parent, CommitLogManifest.class, commitTime.getMillis());
}
/** Returns the commit time encoded into a CommitLogManifest key. */
public static DateTime extractCommitTime(Key<CommitLogManifest> manifestKey) {
return new DateTime(manifestKey.getId(), UTC);
}
}

View file

@ -1,100 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.model.ofy;
import static com.google.appengine.api.datastore.EntityTranslator.convertToPb;
import static com.google.appengine.api.datastore.EntityTranslator.createFromPbBytes;
import static com.google.common.base.Preconditions.checkNotNull;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import com.google.appengine.api.datastore.KeyFactory;
import com.google.common.annotations.VisibleForTesting;
import com.googlecode.objectify.Key;
import com.googlecode.objectify.annotation.Entity;
import com.googlecode.objectify.annotation.Id;
import com.googlecode.objectify.annotation.Parent;
import google.registry.model.ImmutableObject;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.annotations.NotBackedUp;
import google.registry.model.annotations.NotBackedUp.Reason;
/** Representation of a saved entity in a {@link CommitLogManifest} (not deletes). */
@Entity
@NotBackedUp(reason = Reason.COMMIT_LOGS)
@DeleteAfterMigration
public class CommitLogMutation extends ImmutableObject {
/** The manifest this belongs to. */
@Parent
Key<CommitLogManifest> parent;
/** Serialized web-safe string representation of saved entity key. */
@Id
String entityKey;
/**
* Raw entity that was saved during the transaction, serialized as a protocol buffer.
*
* <p>This value will be written to a GCS file by an export task.
*/
byte[] entityProtoBytes;
public byte[] getEntityProtoBytes() {
return entityProtoBytes.clone();
}
/** Deserializes embedded entity bytes and returns it. */
public com.google.appengine.api.datastore.Entity getEntity() {
return createFromPbBytes(entityProtoBytes);
}
/**
* Returns a new mutation entity created from an @Entity ImmutableObject instance.
*
* <p>The mutation key is generated deterministically from the {@code entity} key. The object is
* converted to a raw Datastore Entity, serialized to bytes, and stored within the mutation.
*/
public static CommitLogMutation create(Key<CommitLogManifest> parent, Object entity) {
return createFromRaw(parent, auditedOfy().saveIgnoringReadOnlyWithBackup().toEntity(entity));
}
/**
* Returns a new mutation entity created from a raw Datastore Entity instance.
*
* <p>The mutation key is generated deterministically from the {@code entity} key. The Entity
* itself is serialized to bytes and stored within the returned mutation.
*/
@VisibleForTesting
public static CommitLogMutation createFromRaw(
Key<CommitLogManifest> parent,
com.google.appengine.api.datastore.Entity rawEntity) {
CommitLogMutation instance = new CommitLogMutation();
instance.parent = checkNotNull(parent);
// TODO(b/207516684): figure out if this should be converted to a vkey string via stringify()
// Creates a web-safe key string.
instance.entityKey = KeyFactory.keyToString(rawEntity.getKey());
instance.entityProtoBytes = convertToPb(rawEntity).toByteArray();
return instance;
}
/** Returns the key of a mutation based on the {@code entityKey} of the entity it stores. */
public static
Key<CommitLogMutation> createKey(Key<CommitLogManifest> parent, Key<?> entityKey) {
// TODO(b/207516684): figure out if the return type needs to be VKey and
// if the string used to create a key should remain the same
return Key.create(parent, CommitLogMutation.class, entityKey.getString());
}
}

View file

@ -15,29 +15,12 @@
package google.registry.model.ofy;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.base.Predicates.in;
import static com.google.common.base.Predicates.not;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import static com.google.common.collect.Maps.filterKeys;
import static com.google.common.collect.Sets.difference;
import static com.google.common.collect.Sets.union;
import static google.registry.model.ofy.CommitLogBucket.loadBucket;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import static google.registry.util.DateTimeUtils.isBeforeOrAt;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.googlecode.objectify.Key;
import google.registry.model.BackupGroupRoot;
import google.registry.model.ImmutableObject;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.util.Clock;
import java.util.HashSet;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.function.Supplier;
import org.joda.time.DateTime;
/** Wrapper for {@link Supplier} that associates a time with each attempt. */
@DeleteAfterMigration
@ -57,17 +40,6 @@ public class CommitLoggedWork<R> implements Runnable {
*/
private R result;
/**
* Temporary place to store the key of the commit log manifest.
*
* <p>We can use this to determine whether a transaction that failed with a
* {@link com.google.appengine.api.datastore.DatastoreTimeoutException} actually succeeded. If
* the manifest exists, and if the contents of the commit log are what we expected to have saved,
* then the transaction committed. If the manifest does not exist, then the transaction failed and
* is retryable.
*/
protected CommitLogManifest manifest;
/**
* Temporary place to store the mutations belonging to the commit log manifest.
*
@ -96,16 +68,6 @@ public class CommitLoggedWork<R> implements Runnable {
return result;
}
CommitLogManifest getManifest() {
checkState(runCalled, "Cannot call getManifest() before run()");
return manifest;
}
ImmutableSet<ImmutableObject> getMutations() {
checkState(runCalled, "Cannot call getMutations() before run()");
return mutations;
}
@Override
public void run() {
// The previous time will generally be null, except when using transactNew.
@ -114,90 +76,9 @@ public class CommitLoggedWork<R> implements Runnable {
try {
Ofy.TRANSACTION_INFO.set(createNewTransactionInfo());
result = work.get();
saveCommitLog(Ofy.TRANSACTION_INFO.get());
} finally {
Ofy.TRANSACTION_INFO.set(previous);
}
runCalled = true;
}
/** Records all mutations enrolled by this transaction to a {@link CommitLogManifest} entry. */
private void saveCommitLog(TransactionInfo info) {
ImmutableSet<Key<?>> touchedKeys = info.getTouchedKeys();
if (touchedKeys.isEmpty()) {
return;
}
CommitLogBucket bucket = loadBucket(info.bucketKey);
// Enforce unique monotonic property on CommitLogBucket.getLastWrittenTime().
if (isBeforeOrAt(info.transactionTime, bucket.getLastWrittenTime())) {
throw new TimestampInversionException(info.transactionTime, bucket.getLastWrittenTime());
}
// The keys read by Objectify during this transaction. This won't include the keys of
// asynchronous save and delete operations that haven't been reaped, but that's ok because we
// already logged all of those keys in {@link TransactionInfo} and now just need to figure out
// what was loaded.
ImmutableSet<Key<?>> keysInSessionCache = auditedOfy().getSessionKeys();
Map<Key<BackupGroupRoot>, BackupGroupRoot> rootsForTouchedKeys =
getBackupGroupRoots(touchedKeys);
Map<Key<BackupGroupRoot>, BackupGroupRoot> rootsForUntouchedKeys =
getBackupGroupRoots(difference(keysInSessionCache, touchedKeys));
// Check the update timestamps of all keys in the transaction, whether touched or merely read.
checkBackupGroupRootTimestamps(
info.transactionTime,
union(rootsForUntouchedKeys.entrySet(), rootsForTouchedKeys.entrySet()));
// Find any BGRs that have children which were touched but were not themselves touched.
Set<BackupGroupRoot> untouchedRootsWithTouchedChildren =
ImmutableSet.copyOf(filterKeys(rootsForTouchedKeys, not(in(touchedKeys))).values());
manifest = CommitLogManifest.create(info.bucketKey, info.transactionTime, info.getDeletes());
final Key<CommitLogManifest> manifestKey = Key.create(manifest);
mutations =
union(info.getSaves(), untouchedRootsWithTouchedChildren)
.stream()
.map(entity -> (ImmutableObject) CommitLogMutation.create(manifestKey, entity))
.collect(toImmutableSet());
auditedOfy()
.saveIgnoringReadOnlyWithoutBackup()
.entities(
new ImmutableSet.Builder<>()
.add(manifest)
.add(bucket.asBuilder().setLastWrittenTime(info.transactionTime).build())
.addAll(mutations)
.addAll(untouchedRootsWithTouchedChildren)
.build())
.now();
}
/** Check that the timestamp of each BackupGroupRoot is in the past. */
private void checkBackupGroupRootTimestamps(
DateTime transactionTime, Set<Entry<Key<BackupGroupRoot>, BackupGroupRoot>> bgrEntries) {
ImmutableMap.Builder<Key<BackupGroupRoot>, DateTime> builder = new ImmutableMap.Builder<>();
for (Entry<Key<BackupGroupRoot>, BackupGroupRoot> entry : bgrEntries) {
DateTime updateTime = entry.getValue().getUpdateTimestamp().getTimestamp();
if (!updateTime.isBefore(transactionTime)) {
builder.put(entry.getKey(), updateTime);
}
}
ImmutableMap<Key<BackupGroupRoot>, DateTime> problematicRoots = builder.build();
if (!problematicRoots.isEmpty()) {
throw new TimestampInversionException(transactionTime, problematicRoots);
}
}
/** Find the set of {@link BackupGroupRoot} ancestors of the given keys. */
private Map<Key<BackupGroupRoot>, BackupGroupRoot> getBackupGroupRoots(Iterable<Key<?>> keys) {
Set<Key<BackupGroupRoot>> rootKeys = new HashSet<>();
for (Key<?> key : keys) {
while (key != null
&& !BackupGroupRoot.class.isAssignableFrom(
auditedOfy().factory().getMetadata(key).getEntityClass())) {
key = key.getParent();
}
if (key != null) {
@SuppressWarnings("unchecked")
Key<BackupGroupRoot> rootKey = (Key<BackupGroupRoot>) key;
rootKeys.add(rootKey);
}
}
return ImmutableMap.copyOf(auditedOfy().load().keys(rootKeys));
}
}

View file

@ -1,63 +0,0 @@
// Copyright 2020 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.model.ofy;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import google.registry.model.annotations.DeleteAfterMigration;
/**
* Contains the mapping from class names to SQL-replay-write priorities.
*
* <p>When replaying Datastore commit logs to SQL (asynchronous replication), in order to avoid
* issues with foreign keys, we should replay entity writes so that foreign key references are
* always written after the entity that they reference. This class represents that DAG, where lower
* values represent an earlier write (and later delete). Higher-valued classes can have foreign keys
* on lower-valued classes, but not vice versa.
*/
@DeleteAfterMigration
public class EntityWritePriorities {
/**
* Mapping from class name to "priority".
*
* <p>Here, "priority" means the order in which the class should be inserted / updated in a
* transaction with respect to instances of other classes. By default, all classes have a priority
* number of zero.
*
* <p>For each transaction, classes should be written in priority order from the lowest number to
* the highest, in order to maintain foreign-key write consistency. For the same reason, deletes
* should happen after all writes.
*/
static final ImmutableMap<String, Integer> CLASS_PRIORITIES =
ImmutableMap.of(
"AllocationToken", -9,
"ContactResource", 8,
"HostResource", 9,
"DomainBase", 10,
"HistoryEntry", 20);
// The beginning of the range of priority numbers reserved for delete. This must be greater than
// any of the values in CLASS_PRIORITIES by enough overhead to accommodate any negative values in
// it. Note: by design, deletions will happen in the opposite order of insertions, which is
// necessary to make sure foreign keys aren't violated during deletion.
@VisibleForTesting static final int DELETE_RANGE = Integer.MAX_VALUE / 2;
/** Returns the priority of the entity type in the map entry. */
public static int getEntityPriority(String kind, boolean isDelete) {
int priority = CLASS_PRIORITIES.getOrDefault(kind, 0);
return isDelete ? DELETE_RANGE - priority : priority;
}
}

View file

@ -38,7 +38,6 @@ import google.registry.model.ImmutableObject;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.translators.BloomFilterOfStringTranslatorFactory;
import google.registry.model.translators.CidrAddressBlockTranslatorFactory;
import google.registry.model.translators.CommitLogRevisionsTranslatorFactory;
import google.registry.model.translators.CreateAutoTimestampTranslatorFactory;
import google.registry.model.translators.CurrencyUnitTranslatorFactory;
import google.registry.model.translators.DurationTranslatorFactory;
@ -124,7 +123,6 @@ public class ObjectifyService {
ImmutableList.of(
new BloomFilterOfStringTranslatorFactory(),
new CidrAddressBlockTranslatorFactory(),
new CommitLogRevisionsTranslatorFactory(),
new CreateAutoTimestampTranslatorFactory(),
new CurrencyUnitTranslatorFactory(),
new DurationTranslatorFactory(),

View file

@ -19,11 +19,9 @@ import static com.google.common.base.Preconditions.checkState;
import static com.google.common.collect.Maps.uniqueIndex;
import static com.googlecode.objectify.ObjectifyService.ofy;
import static google.registry.config.RegistryConfig.getBaseOfyRetryDuration;
import static google.registry.util.CollectionUtils.union;
import com.google.appengine.api.datastore.DatastoreFailureException;
import com.google.appengine.api.datastore.DatastoreTimeoutException;
import com.google.appengine.api.datastore.Entity;
import com.google.appengine.api.taskqueue.TransientFailureException;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
@ -238,8 +236,7 @@ public class Ofy {
/** Pause the current transaction (if any) and complete this one before returning to it. */
public <R> R transactNew(Supplier<R> work) {
// Wrap the Work in a CommitLoggedWork so that we can give transactions a frozen view of time
// and maintain commit logs for them.
// Wrap the Work in a CommitLoggedWork so that we can give transactions a frozen view of time.
return transactCommitLoggedWork(new CommitLoggedWork<>(work, getClock()));
}
@ -260,7 +257,7 @@ public class Ofy {
/**
* Transact with commit logs and retry with exponential backoff.
*
* <p>This method is broken out from {@link #transactNew(Work)} for testing purposes.
* <p>This method is broken out from {@link #transactNew(Supplier)} for testing purposes.
*/
@VisibleForTesting
<R> R transactCommitLoggedWork(CommitLoggedWork<R> work) {
@ -282,7 +279,7 @@ public class Ofy {
// TimestampInversionExceptions are thrown by our code and are always retryable as well.
// However, Datastore exceptions might get thrown even if the transaction succeeded.
if ((e instanceof DatastoreTimeoutException || e instanceof DatastoreFailureException)
&& checkIfAlreadySucceeded(work)) {
&& work.hasRun()) {
return work.getResult();
}
if (attempt == NUM_RETRIES) {
@ -295,31 +292,6 @@ public class Ofy {
}
}
/**
* We can determine whether a transaction has succeded by trying to read the commit log back in
* its own retryable read-only transaction.
*/
private <R> Boolean checkIfAlreadySucceeded(final CommitLoggedWork<R> work) {
return work.hasRun() && transactNewReadOnly(() -> {
CommitLogManifest manifest = work.getManifest();
if (manifest == null) {
// Work ran but no commit log was created. This might mean that the transaction did not
// write anything to Datastore. We can safely retry because it only reads. (Although the
// transaction might have written a task to a queue, we consider that safe to retry too
// since we generally assume that tasks might be doubly executed.) Alternatively it
// might mean that the transaction wrote to Datastore but turned off commit logs by
// exclusively using save/deleteWithoutBackups() rather than save/delete(). Although we
// have no hard proof that retrying is safe, we use these methods judiciously and it is
// reasonable to assume that if the transaction really did succeed that the retry will
// either be idempotent or will fail with a non-transient error.
return false;
}
return Objects.equals(
union(work.getMutations(), manifest),
ImmutableSet.copyOf(load().ancestor(manifest)));
});
}
/** A read-only transaction is useful to get strongly consistent reads at a shared timestamp. */
<R> R transactNewReadOnly(Supplier<R> work) {
ReadOnlyWork<R> readOnlyWork = new ReadOnlyWork<>(work, getClock());
@ -381,23 +353,6 @@ public class Ofy {
return TRANSACTION_INFO.get().transactionTime;
}
/** Returns key of {@link CommitLogManifest} that will be saved when the transaction ends. */
public Key<CommitLogManifest> getCommitLogManifestKey() {
assertInTransaction();
TransactionInfo info = TRANSACTION_INFO.get();
return Key.create(info.bucketKey, CommitLogManifest.class, info.transactionTime.getMillis());
}
/** Convert an entity POJO to a datastore Entity. */
public Entity toEntity(Object pojo) {
return ofy().save().toEntity(pojo);
}
/** Convert a datastore entity to a POJO. */
public Object toPojo(Entity entity) {
return ofy().load().fromEntity(entity);
}
/**
* Returns the @Entity-annotated base class for an object that is either an {@code Key<?>} or an
* object of an entity class registered with Objectify.

View file

@ -15,16 +15,10 @@
package google.registry.model.ofy;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.base.Predicates.not;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import static com.google.common.collect.Maps.filterValues;
import static com.google.common.collect.Maps.toMap;
import static google.registry.model.ofy.CommitLogBucket.getArbitraryBucketId;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.googlecode.objectify.Key;
import google.registry.model.annotations.DeleteAfterMigration;
import java.util.Map;
@ -45,9 +39,6 @@ public class TransactionInfo {
/** Whether this is a read-only transaction. */
private boolean readOnly;
/** Bucket shard to under which commit log will be stored, chosen at random (in production). */
final Key<CommitLogBucket> bucketKey = CommitLogBucket.getBucketKey(getArbitraryBucketId());
/**
* Accumulator of save/delete operations performed in transaction.
*
@ -59,7 +50,6 @@ public class TransactionInfo {
TransactionInfo(DateTime now) {
this.transactionTime = now;
auditedOfy().load().key(bucketKey); // Asynchronously load value into session cache.
}
TransactionInfo setReadOnly() {
@ -80,26 +70,4 @@ public class TransactionInfo {
assertNotReadOnly();
changesBuilder.putAll(toMap(keys, k -> Delete.SENTINEL));
}
ImmutableSet<Key<?>> getTouchedKeys() {
return ImmutableSet.copyOf(changesBuilder.build().keySet());
}
ImmutableMap<Key<?>, Object> getChanges() {
return changesBuilder.build();
}
ImmutableSet<Key<?>> getDeletes() {
return ImmutableSet.copyOf(
filterValues(changesBuilder.build(), Delete.SENTINEL::equals).keySet());
}
ImmutableSet<Object> getSaves() {
return changesBuilder
.build()
.values()
.stream()
.filter(not(Delete.SENTINEL::equals))
.collect(toImmutableSet());
}
}

View file

@ -1,79 +0,0 @@
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.model.translators;
import static com.google.common.base.MoreObjects.firstNonNull;
import static google.registry.config.RegistryConfig.getCommitLogDatastoreRetention;
import static google.registry.model.ofy.ObjectifyService.auditedOfy;
import static google.registry.persistence.transaction.TransactionManagerFactory.ofyTm;
import static google.registry.util.DateTimeUtils.START_OF_TIME;
import com.google.common.collect.ImmutableSortedMap;
import com.google.common.collect.Ordering;
import com.googlecode.objectify.Key;
import google.registry.model.annotations.DeleteAfterMigration;
import google.registry.model.ofy.CommitLogManifest;
import org.joda.time.DateTime;
/**
* Objectify translator for {@code ImmutableSortedMap<DateTime, Key<CommitLogManifest>>} fields.
*
* <p>This translator is responsible for doing three things:
*
* <ol>
* <li>Translating the data into two lists of {@code Date} and {@code Key} objects, in a manner
* similar to {@code @Mapify}.
* <li>Inserting a key to the transaction's {@link CommitLogManifest} on save.
* <li>Truncating the map to include only the last key per day for the last 30 days.
* </ol>
*
* <p>This allows you to have a field on your model object that tracks historical revisions of
* itself, which can be binary searched for point-in-time restoration.
*
* <p><b>Warning:</b> Fields of this type must not be {@code null}, or else new entries can't be
* inserted. You must take care to initialize the field to empty.
*
* @see google.registry.model.EppResource
*/
@DeleteAfterMigration
public final class CommitLogRevisionsTranslatorFactory
extends ImmutableSortedMapTranslatorFactory<DateTime, Key<CommitLogManifest>> {
/**
* Add a reference to the current commit log to the resource's revisions map.
*
* <p>This method also prunes the revisions map. It guarantees to keep enough data so that floor
* will work going back N days. It does this by making sure one entry exists before that duration,
* and pruning everything after it. The size of the map is guaranteed to never exceed N+2.
*
* <p>We store a maximum of one entry per day. It will be the last transaction that happened on
* that day.
*
* <p>In serialization mode, this method just returns "revisions" without modification.
*
* @see google.registry.config.RegistryConfig#getCommitLogDatastoreRetention()
*/
@Override
ImmutableSortedMap<DateTime, Key<CommitLogManifest>> transformBeforeSave(
ImmutableSortedMap<DateTime, Key<CommitLogManifest>> revisions) {
DateTime now = ofyTm().getTransactionTime();
DateTime threshold = now.minus(getCommitLogDatastoreRetention());
DateTime preThresholdTime = firstNonNull(revisions.floorKey(threshold), START_OF_TIME);
return new ImmutableSortedMap.Builder<DateTime, Key<CommitLogManifest>>(Ordering.natural())
.putAll(revisions.subMap(preThresholdTime, true, now.withTimeAtStartOfDay(), false))
.put(now, auditedOfy().getCommitLogManifestKey())
.build();
}
}

View file

@ -16,24 +16,18 @@ package google.registry.module.backend;
import dagger.Module;
import dagger.Subcomponent;
import google.registry.backup.BackupModule;
import google.registry.backup.CommitLogCheckpointAction;
import google.registry.backup.DeleteOldCommitLogsAction;
import google.registry.backup.ExportCommitLogDiffAction;
import google.registry.batch.BatchModule;
import google.registry.batch.DeleteExpiredDomainsAction;
import google.registry.batch.DeleteLoadTestDataAction;
import google.registry.batch.DeleteProberDataAction;
import google.registry.batch.ExpandRecurringBillingEventsAction;
import google.registry.batch.RelockDomainAction;
import google.registry.batch.ResaveAllEppResourcesAction;
import google.registry.batch.ResaveAllEppResourcesPipelineAction;
import google.registry.batch.ResaveEntityAction;
import google.registry.batch.SendExpiringCertificateNotificationEmailAction;
import google.registry.batch.WipeOutCloudSqlAction;
import google.registry.batch.WipeOutContactHistoryPiiAction;
import google.registry.batch.WipeoutDatastoreAction;
import google.registry.cron.CommitLogFanoutAction;
import google.registry.cron.CronModule;
import google.registry.cron.TldFanoutAction;
import google.registry.dns.DnsModule;
@ -58,7 +52,6 @@ import google.registry.export.sheet.SheetModule;
import google.registry.export.sheet.SyncRegistrarsSheetAction;
import google.registry.flows.FlowComponent;
import google.registry.flows.custom.CustomLogicModule;
import google.registry.mapreduce.MapreduceModule;
import google.registry.monitoring.whitebox.WhiteboxModule;
import google.registry.rdap.UpdateRegistrarRdapBaseUrlsAction;
import google.registry.rde.BrdaCopyAction;
@ -93,7 +86,6 @@ import google.registry.tmch.TmchSmdrlAction;
@RequestScope
@Subcomponent(
modules = {
BackupModule.class,
BatchModule.class,
BillingModule.class,
CloudDnsWriterModule.class,
@ -105,7 +97,6 @@ import google.registry.tmch.TmchSmdrlAction;
DnsUpdateWriterModule.class,
ExportRequestModule.class,
IcannReportingModule.class,
MapreduceModule.class,
RdeModule.class,
ReportingModule.class,
RequestModule.class,
@ -125,24 +116,16 @@ interface BackendRequestComponent {
CheckBackupAction checkBackupAction();
CommitLogCheckpointAction commitLogCheckpointAction();
CommitLogFanoutAction commitLogFanoutAction();
CopyDetailReportsAction copyDetailReportAction();
DeleteExpiredDomainsAction deleteExpiredDomainsAction();
DeleteLoadTestDataAction deleteLoadTestDataAction();
DeleteOldCommitLogsAction deleteOldCommitLogsAction();
DeleteProberDataAction deleteProberDataAction();
ExpandRecurringBillingEventsAction expandRecurringBillingEventsAction();
ExportCommitLogDiffAction exportCommitLogDiffAction();
ExportDomainListsAction exportDomainListsAction();
ExportPremiumTermsAction exportPremiumTermsAction();
@ -181,8 +164,6 @@ interface BackendRequestComponent {
RelockDomainAction relockDomainAction();
ResaveAllEppResourcesAction resaveAllEppResourcesAction();
ResaveAllEppResourcesPipelineAction resaveAllEppResourcesPipelineAction();
ResaveEntityAction resaveEntityAction();

View file

@ -16,23 +16,18 @@ package google.registry.module.tools;
import dagger.Module;
import dagger.Subcomponent;
import google.registry.backup.BackupModule;
import google.registry.backup.RestoreCommitLogsAction;
import google.registry.dns.DnsModule;
import google.registry.flows.EppToolAction;
import google.registry.flows.EppToolAction.EppToolModule;
import google.registry.flows.FlowComponent;
import google.registry.loadtest.LoadTestAction;
import google.registry.loadtest.LoadTestModule;
import google.registry.mapreduce.MapreduceModule;
import google.registry.monitoring.whitebox.WhiteboxModule;
import google.registry.request.RequestComponentBuilder;
import google.registry.request.RequestModule;
import google.registry.request.RequestScope;
import google.registry.tools.server.CreateGroupsAction;
import google.registry.tools.server.GenerateZoneFilesAction;
import google.registry.tools.server.KillAllCommitLogsAction;
import google.registry.tools.server.KillAllEppResourcesAction;
import google.registry.tools.server.ListDomainsAction;
import google.registry.tools.server.ListHostsAction;
import google.registry.tools.server.ListPremiumListsAction;
@ -40,7 +35,6 @@ import google.registry.tools.server.ListRegistrarsAction;
import google.registry.tools.server.ListReservedListsAction;
import google.registry.tools.server.ListTldsAction;
import google.registry.tools.server.RefreshDnsForAllDomainsAction;
import google.registry.tools.server.ResaveAllHistoryEntriesAction;
import google.registry.tools.server.ToolsServerModule;
import google.registry.tools.server.VerifyOteAction;
@ -48,11 +42,9 @@ import google.registry.tools.server.VerifyOteAction;
@RequestScope
@Subcomponent(
modules = {
BackupModule.class,
DnsModule.class,
EppToolModule.class,
LoadTestModule.class,
MapreduceModule.class,
RequestModule.class,
ToolsServerModule.class,
WhiteboxModule.class,
@ -62,8 +54,6 @@ interface ToolsRequestComponent {
EppToolAction eppToolAction();
FlowComponent.Builder flowComponentBuilder();
GenerateZoneFilesAction generateZoneFilesAction();
KillAllCommitLogsAction killAllCommitLogsAction();
KillAllEppResourcesAction killAllEppResourcesAction();
ListDomainsAction listDomainsAction();
ListHostsAction listHostsAction();
ListPremiumListsAction listPremiumListsAction();
@ -72,8 +62,6 @@ interface ToolsRequestComponent {
ListTldsAction listTldsAction();
LoadTestAction loadTestAction();
RefreshDnsForAllDomainsAction refreshDnsForAllDomainsAction();
ResaveAllHistoryEntriesAction resaveAllHistoryEntriesAction();
RestoreCommitLogsAction restoreCommitLogsAction();
VerifyOteAction verifyOteAction();
@Subcomponent.Builder

View file

@ -49,9 +49,9 @@ import org.joda.time.DateTime;
/**
* Action that re-encrypts a BRDA escrow deposit and puts it into the upload bucket.
*
* <p>This action is run by the mapreduce for each BRDA staging file it generates. The staging file
* is encrypted with our internal {@link Ghostryde} encryption. We then re-encrypt it as a RyDE
* file, which is what the third-party escrow provider understands.
* <p>This action is run for each BRDA staging file it generates. The staging file is encrypted with
* our internal {@link Ghostryde} encryption. We then re-encrypt it as a RyDE file, which is what
* the third-party escrow provider understands.
*
* <p>Then we put the RyDE file (along with our digital signature) into the configured BRDA bucket.
* This bucket is special because a separate script will rsync it to the third party escrow provider

View file

@ -39,17 +39,17 @@ import org.joda.time.Duration;
* <p>This class is called by {@link RdeStagingAction} at the beginning of its execution. Since it
* stages everything in a single run, it needs to know what's awaiting deposit.
*
* <p>We start off by getting the list of TLDs with escrow enabled. We then check {@code cursor}
* to see when it when it was due for a deposit. If that's in the past, then we know that we need
* to generate a deposit. If it's really far in the past, we might have to generate multiple
* deposits for that TLD, based on the configured interval.
* <p>We start off by getting the list of TLDs with escrow enabled. We then check {@code cursor} to
* see when it was due for a deposit. If that's in the past, then we know that we need to generate a
* deposit. If it's really far in the past, we might have to generate multiple deposits for that
* TLD, based on the configured interval.
*
* <p><i>However</i> we will only generate one interval forward per mapreduce, since the reduce
* phase rolls forward a TLD's cursor, and we can't have that happening in parallel.
* <p><i>However</i> we will only generate one interval forward per run, since the reduce phase
* rolls forward a TLD's cursor, and we can't have that happening in parallel.
*
* <p>If no deposits have been made so far, then {@code startingPoint} is used as the watermark
* of the next deposit. If that's a day in the future, then escrow won't start until that date.
* This first deposit time will be set to Datastore in a transaction.
* <p>If no deposits have been made so far, then {@code startingPoint} is used as the watermark of
* the next deposit. If that's a day in the future, then escrow won't start until that date. This
* first deposit time will be set to Datastore in a transaction.
*/
public final class PendingDepositChecker {

View file

@ -16,7 +16,6 @@ package google.registry.rde;
import static com.google.common.collect.ImmutableSetMultimap.toImmutableSetMultimap;
import static google.registry.beam.BeamUtils.createJobName;
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
import static google.registry.request.Action.Method.GET;
import static google.registry.request.Action.Method.POST;
import static google.registry.xml.ValidationMode.LENIENT;
@ -44,10 +43,6 @@ import google.registry.config.RegistryConfig.Config;
import google.registry.config.RegistryEnvironment;
import google.registry.gcs.GcsUtils;
import google.registry.keyring.api.KeyModule.Key;
import google.registry.mapreduce.MapreduceRunner;
import google.registry.mapreduce.inputs.EppResourceInputs;
import google.registry.mapreduce.inputs.NullInput;
import google.registry.model.EppResource;
import google.registry.model.common.Cursor;
import google.registry.model.common.Cursor.CursorType;
import google.registry.model.contact.ContactResource;
@ -72,29 +67,14 @@ import org.joda.time.DateTime;
import org.joda.time.Duration;
/**
* Action that kicks off either a MapReduce (for Datastore) or Dataflow (for Cloud SQL) job to stage
* escrow deposit XML files on GCS for RDE/BRDA for all TLDs.
* Action that kicks off a Dataflow job to stage escrow deposit XML files on GCS for RDE/BRDA for
* all TLDs.
*
* <h3>Pending Deposits</h3>
*
* <p>This task starts by asking {@link PendingDepositChecker} which deposits need to be generated.
* If there's nothing to deposit, we return 204 No Content; otherwise, we fire off a MapReduce job
* and redirect to its status GUI. The task can also be run in manual operation, as described below.
*
* <h3>MapReduce</h3>
*
* <p>The mapreduce job scans every {@link EppResource} in Datastore. It maps a point-in-time
* representation of each entity to the escrow XML files in which it should appear.
*
* <p>There is one map worker for each {@code EppResourceIndexBucket} entity group shard. There is
* one reduce worker for each deposit being generated.
*
* <p>{@link ContactResource} and {@link HostResource} are emitted on all TLDs, even when the
* domains on a TLD don't reference them. BRDA {@link RdeMode#THIN thin} deposits exclude contacts
* and hosts entirely.
*
* <p>{@link Registrar} entities, both active and inactive, are included in all deposits. They are
* not rewinded point-in-time.
* If there's nothing to deposit, we return 204 No Content; otherwise, we fire off a job and
* redirect to its status GUI. The task can also be run in manual operation, as described below.
*
* <h3>Dataflow</h3>
*
@ -166,10 +146,6 @@ import org.joda.time.Duration;
*
* <h3>Idempotency</h3>
*
* <p>We lock the reduce tasks for the MapReduce job. This is necessary because: a) App Engine tasks
* might get double executed; and b) Cloud Storage file handles get committed on close <i>even if
* our code throws an exception.</i>
*
* <p>For the Dataflow job we do not employ a lock because it is difficult to span a lock across
* three subsequent transforms (save to GCS, roll forward cursor, enqueue next action). Instead, we
* get around the issue by saving the deposit to a unique folder named after the job name so there
@ -250,10 +226,8 @@ public final class RdeStagingAction implements Runnable {
@Inject Clock clock;
@Inject PendingDepositChecker pendingDepositChecker;
@Inject RdeStagingReducer.Factory reducerFactory;
@Inject Response response;
@Inject GcsUtils gcsUtils;
@Inject MapreduceRunner mrRunner;
@Inject @Config("projectId") String projectId;
@Inject @Config("defaultJobRegion") String jobRegion;
@ -270,10 +244,6 @@ public final class RdeStagingAction implements Runnable {
@Inject @Config("rdeBucket") String rdeBucket;
@Inject @Parameter(RdeModule.PARAM_MANUAL) boolean manual;
@Inject
@Parameter(RdeModule.PARAM_BEAM)
boolean beam;
@Inject @Parameter(RdeModule.PARAM_DIRECTORY) Optional<String> directory;
@Inject @Parameter(RdeModule.PARAM_MODE) ImmutableSet<String> modeStrings;
@Inject @Parameter(RequestParameters.PARAM_TLDS) ImmutableSet<String> tlds;
@ -299,85 +269,67 @@ public final class RdeStagingAction implements Runnable {
logger.atInfo().log("Pending deposit: %s", pending);
}
ValidationMode validationMode = lenient ? LENIENT : STRICT;
if (tm().isOfy() && !beam) {
RdeStagingMapper mapper = new RdeStagingMapper(validationMode, pendings);
RdeStagingReducer reducer = reducerFactory.create(validationMode, gcsUtils);
mrRunner
.setJobName("Stage escrow deposits for all TLDs")
.setModuleName("backend")
.setDefaultReduceShards(pendings.size())
.runMapreduce(
mapper,
reducer,
ImmutableList.of(
// Add an extra shard that maps over a null resource. See the mapper code for why.
new NullInput<>(), EppResourceInputs.createEntityInput(EppResource.class)))
.sendLinkToMapreduceConsole(response);
} else {
ImmutableList.Builder<String> jobNameBuilder = new ImmutableList.Builder<>();
pendings.values().stream()
.collect(toImmutableSetMultimap(PendingDeposit::watermark, identity()))
.asMap()
.forEach(
(watermark, pendingDeposits) -> {
try {
LaunchFlexTemplateParameter parameter =
new LaunchFlexTemplateParameter()
.setJobName(
createJobName(
String.format(
"rde-%s", watermark.toString("yyyy-MM-dd't'HH-mm-ss'z'")),
clock))
.setContainerSpecGcsPath(
String.format("%s/%s_metadata.json", stagingBucketUrl, PIPELINE_NAME))
.setParameters(
new ImmutableMap.Builder<String, String>()
.put(
"pendings",
RdePipeline.encodePendingDeposits(
ImmutableSet.copyOf(pendingDeposits)))
.put("validationMode", validationMode.name())
.put("rdeStagingBucket", rdeBucket)
.put(
"stagingKey",
BaseEncoding.base64Url()
.omitPadding()
.encode(stagingKeyBytes))
.put("registryEnvironment", RegistryEnvironment.get().name())
.put("workerMachineType", machineType)
.put("numWorkers", String.valueOf(numWorkers))
.put(
"jpaTransactionManagerType",
JpaTransactionManagerType.READ_ONLY_REPLICA.toString())
// TODO (jianglai): Investigate turning off public IPs (for which
// there is a quota) in order to increase the total number of
// workers allowed (also under quota).
// See:
// https://cloud.google.com/dataflow/docs/guides/routes-firewall
.put("usePublicIps", "true")
.build());
LaunchFlexTemplateResponse launchResponse =
dataflow
.projects()
.locations()
.flexTemplates()
.launch(
projectId,
jobRegion,
new LaunchFlexTemplateRequest().setLaunchParameter(parameter))
.execute();
logger.atInfo().log("Got response: %s", launchResponse.getJob().toPrettyString());
jobNameBuilder.add(launchResponse.getJob().getId());
} catch (IOException e) {
logger.atWarning().withCause(e).log("Pipeline Launch failed");
response.setStatus(SC_INTERNAL_SERVER_ERROR);
response.setPayload(String.format("Pipeline launch failed: %s", e.getMessage()));
}
});
response.setStatus(SC_OK);
response.setPayload(
String.format("Launched RDE pipeline: %s", Joiner.on(", ").join(jobNameBuilder.build())));
}
ImmutableList.Builder<String> jobNameBuilder = new ImmutableList.Builder<>();
pendings.values().stream()
.collect(toImmutableSetMultimap(PendingDeposit::watermark, identity()))
.asMap()
.forEach(
(watermark, pendingDeposits) -> {
try {
LaunchFlexTemplateParameter parameter =
new LaunchFlexTemplateParameter()
.setJobName(
createJobName(
String.format(
"rde-%s", watermark.toString("yyyy-MM-dd't'HH-mm-ss'z'")),
clock))
.setContainerSpecGcsPath(
String.format("%s/%s_metadata.json", stagingBucketUrl, PIPELINE_NAME))
.setParameters(
new ImmutableMap.Builder<String, String>()
.put(
"pendings",
RdePipeline.encodePendingDeposits(
ImmutableSet.copyOf(pendingDeposits)))
.put("validationMode", validationMode.name())
.put("rdeStagingBucket", rdeBucket)
.put(
"stagingKey",
BaseEncoding.base64Url().omitPadding().encode(stagingKeyBytes))
.put("registryEnvironment", RegistryEnvironment.get().name())
.put("workerMachineType", machineType)
.put("numWorkers", String.valueOf(numWorkers))
.put(
"jpaTransactionManagerType",
JpaTransactionManagerType.READ_ONLY_REPLICA.toString())
// TODO (jianglai): Investigate turning off public IPs (for which
// there is a quota) in order to increase the total number of
// workers allowed (also under quota).
// See:
// https://cloud.google.com/dataflow/docs/guides/routes-firewall
.put("usePublicIps", "true")
.build());
LaunchFlexTemplateResponse launchResponse =
dataflow
.projects()
.locations()
.flexTemplates()
.launch(
projectId,
jobRegion,
new LaunchFlexTemplateRequest().setLaunchParameter(parameter))
.execute();
logger.atInfo().log("Got response: %s", launchResponse.getJob().toPrettyString());
jobNameBuilder.add(launchResponse.getJob().getId());
} catch (IOException e) {
logger.atWarning().withCause(e).log("Pipeline Launch failed");
response.setStatus(SC_INTERNAL_SERVER_ERROR);
response.setPayload(String.format("Pipeline launch failed: %s", e.getMessage()));
}
});
response.setStatus(SC_OK);
response.setPayload(
String.format("Launched RDE pipeline: %s", Joiner.on(", ").join(jobNameBuilder.build())));
}
private ImmutableSetMultimap<String, PendingDeposit> getStandardPendingDeposits() {

Some files were not shown because too many files have changed in this diff Show more