mirror of
https://github.com/google/nomulus.git
synced 2025-08-16 06:24:07 +02:00
Remove datastore related code (#1906)
This commit is contained in:
parent
913edb23ee
commit
e41fd7877e
152 changed files with 886 additions and 4460 deletions
|
@ -374,7 +374,6 @@ class InvoicingPipelineTest {
|
|||
}
|
||||
|
||||
private static void setupCloudSql() {
|
||||
// Populate billing events in Cloud SQL to match existing test data for Datastore
|
||||
persistNewRegistrar("NewRegistrar");
|
||||
persistNewRegistrar("TheRegistrar");
|
||||
Registrar registrar1 = persistNewRegistrar("theRegistrar");
|
||||
|
|
|
@ -75,9 +75,9 @@ import org.junit.jupiter.api.io.TempDir;
|
|||
/**
|
||||
* Unit tests for {@link Spec11Pipeline}.
|
||||
*
|
||||
* <p>Unfortunately there is no emulator for BigQuery like that for Datastore or App Engine.
|
||||
* Therefore we cannot fully test the pipeline but only test the two separate sink IO functions,
|
||||
* assuming that date is sourcede correctly the {@code BigQueryIO}.
|
||||
* <p>Unfortunately there is no emulator for BigQuery, so we cannot fully test the pipeline but only
|
||||
* test the two separate sink IO functions, assuming that date is sourced correctly the {@code
|
||||
* BigQueryIO}.
|
||||
*/
|
||||
class Spec11PipelineTest {
|
||||
|
||||
|
@ -214,7 +214,7 @@ class Spec11PipelineTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
void testSuccess_readFromCloudSql() throws Exception {
|
||||
void testSuccess_readFromCloudSql() {
|
||||
setupCloudSql();
|
||||
PCollection<DomainNameInfo> domainNameInfos = Spec11Pipeline.readFromCloudSql(pipeline);
|
||||
PAssert.that(domainNameInfos).containsInAnyOrder(DOMAIN_NAME_INFOS);
|
||||
|
@ -310,7 +310,7 @@ class Spec11PipelineTest {
|
|||
new File(
|
||||
String.format(
|
||||
"%s/icann/spec11/2020-01/SPEC11_MONTHLY_REPORT_2020-01-27",
|
||||
reportingBucketUrl.getAbsolutePath().toString()));
|
||||
reportingBucketUrl.getAbsolutePath()));
|
||||
return ImmutableList.copyOf(
|
||||
ResourceUtils.readResourceUtf8(resultFile.toURI().toURL()).split("\n"));
|
||||
}
|
||||
|
@ -335,7 +335,6 @@ class Spec11PipelineTest {
|
|||
return false;
|
||||
}
|
||||
}
|
||||
;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ class CheckedBigqueryTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
void testSuccess_datastoreCreation() throws Exception {
|
||||
void testSuccess_datasetCreation() throws Exception {
|
||||
checkedBigquery.ensureDataSetExists("Project-Id", "Dataset-Id");
|
||||
|
||||
ArgumentCaptor<Dataset> datasetArg = ArgumentCaptor.forClass(Dataset.class);
|
||||
|
@ -80,7 +80,7 @@ class CheckedBigqueryTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
void testSuccess_datastoreAndTableCreation() throws Exception {
|
||||
void testSuccess_datasetAndTableCreation() throws Exception {
|
||||
checkedBigquery.ensureDataSetAndTableExist("Project-Id", "Dataset2", "Table2");
|
||||
|
||||
ArgumentCaptor<Dataset> datasetArg = ArgumentCaptor.forClass(Dataset.class);
|
||||
|
|
|
@ -283,7 +283,7 @@ public class CloudDnsWriterTest {
|
|||
return recordSetBuilder.build();
|
||||
}
|
||||
|
||||
/** Returns a domain to be persisted in Datastore. */
|
||||
/** Returns a domain to be persisted in the database. */
|
||||
private static Domain fakeDomain(
|
||||
String domainName, ImmutableSet<Host> nameservers, int numDsRecords) {
|
||||
ImmutableSet.Builder<DomainDsData> dsDataBuilder = new ImmutableSet.Builder<>();
|
||||
|
|
|
@ -790,7 +790,7 @@ class DomainCheckFlowTest extends ResourceCheckFlowTestCase<DomainCheckFlow, Dom
|
|||
@Test
|
||||
void testSuccess_thirtyDomains_restoreFees() throws Exception {
|
||||
// Note that 30 is more than 25, which is the maximum # of entity groups you can enlist in a
|
||||
// single Datastore transaction (each Domain entity is in a separate entity group).
|
||||
// single database transaction (each Domain entity is in a separate entity group).
|
||||
// It's also pretty common for registrars to send large domain checks.
|
||||
setEppInput("domain_check_fee_thirty_domains.xml");
|
||||
// example-00.tld won't exist and thus will not have a renew fee like the others.
|
||||
|
|
|
@ -148,7 +148,7 @@ class DomainInfoFlowTest extends ResourceFlowTestCase<DomainInfoFlow, Domain> {
|
|||
.setAuthInfo(DomainAuthInfo.create(PasswordAuth.create("2fooBAR")))
|
||||
.build());
|
||||
// Set the superordinate domain of ns1.example.com to example.com. In reality, this would have
|
||||
// happened in the flow that created it, but here we just overwrite it in Datastore.
|
||||
// happened in the flow that created it, but here we just overwrite it in the database.
|
||||
host1 = persistResource(host1.asBuilder().setSuperordinateDomain(domain.createVKey()).build());
|
||||
// Create a subordinate host that is not delegated to by anyone.
|
||||
host3 =
|
||||
|
|
|
@ -306,7 +306,7 @@ class DomainTransferRequestFlowTest
|
|||
Stream.of(extraExpectedBillingEvents)
|
||||
.map(builder -> builder.setDomainHistory(historyEntryTransferRequest).build())
|
||||
.collect(toImmutableSet());
|
||||
// Assert that the billing events we constructed above actually exist in Datastore.
|
||||
// Assert that the billing events we constructed above actually exist in the database.
|
||||
ImmutableSet<BillingEvent> expectedBillingEvents =
|
||||
Streams.concat(
|
||||
Stream.of(losingClientAutorenew, gainingClientAutorenew),
|
||||
|
|
|
@ -235,7 +235,7 @@ public class DomainTest {
|
|||
@Test
|
||||
void testPersistence() {
|
||||
// Note that this only verifies that the value stored under the foreign key is the same as that
|
||||
// stored under the primary key ("domain" is the domain loaded from the datastore, not the
|
||||
// stored under the primary key ("domain" is the domain loaded from the the database, not the
|
||||
// original domain object).
|
||||
assertThat(loadByForeignKey(Domain.class, domain.getForeignKey(), fakeClock.nowUtc()))
|
||||
.hasValue(domain);
|
||||
|
|
|
@ -36,7 +36,7 @@ public class ServerSecretTest extends EntityTestCase {
|
|||
}
|
||||
|
||||
@Test
|
||||
void testGet_bootstrapping_savesSecretToDatastore() {
|
||||
void testGet_bootstrapping_savesSecret() {
|
||||
ServerSecret secret = ServerSecret.get();
|
||||
assertThat(secret).isNotNull();
|
||||
assertThat(loadByEntity(new ServerSecret())).isEqualTo(secret);
|
||||
|
|
|
@ -104,11 +104,6 @@ public class ReplicaSimulatingJpaTransactionManager implements JpaTransactionMan
|
|||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T transactWithoutBackup(Supplier<T> work) {
|
||||
return transact(work);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T transactNoRetry(Supplier<T> work) {
|
||||
return transact(work);
|
||||
|
|
|
@ -36,7 +36,7 @@ import google.registry.testing.DatabaseHelper;
|
|||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Datastore fixtures for the development webserver.
|
||||
* Database fixtures for the development webserver.
|
||||
*
|
||||
* <p><b>Warning:</b> These fixtures aren't really intended for unit tests, since they take upwards
|
||||
* of a second to load.
|
||||
|
@ -162,6 +162,6 @@ public enum Fixture {
|
|||
}
|
||||
};
|
||||
|
||||
/** Loads this fixture into Datastore. */
|
||||
/** Loads this fixture into the database. */
|
||||
public abstract void load();
|
||||
}
|
||||
|
|
|
@ -45,8 +45,8 @@ import org.mortbay.jetty.servlet.ServletHolder;
|
|||
/**
|
||||
* HTTP server that serves static content and handles servlet requests in the calling thread.
|
||||
*
|
||||
* <p>Using this server is similar to to other server classes, in that it has {@link #start()} and
|
||||
* {@link #stop()} methods. However a {@link #process()} method was added, which is used to process
|
||||
* <p>Using this server is similar to other server classes, in that it has {@link #start()} and
|
||||
* {@link #stop()} methods. However, a {@link #process()} method was added, which is used to process
|
||||
* requests made to servlets (not static files) in the calling thread.
|
||||
*
|
||||
* <p><b>Note:</b> This server is intended for development purposes. For the love all that is good,
|
||||
|
@ -59,11 +59,6 @@ import org.mortbay.jetty.servlet.ServletHolder;
|
|||
* inside {@link ServletWrapperDelegatorServlet}. When requests come in, a {@link FutureTask} will
|
||||
* be sent back to this class using a {@link LinkedBlockingDeque} message queue. Those messages are
|
||||
* then consumed by the {@code process()} method.
|
||||
*
|
||||
* <p>The reason why this is necessary is because the App Engine local testing services (created by
|
||||
* {@code LocalServiceTestHelper}) only apply to a single thread (probably to allow multi-threaded
|
||||
* tests). So when Jetty creates random threads to handle requests, they won't have access to the
|
||||
* Datastore and other stuff.
|
||||
*/
|
||||
public final class TestServer {
|
||||
|
||||
|
|
|
@ -396,10 +396,10 @@ public final class DatabaseHelper {
|
|||
.collect(
|
||||
toImmutableMap(Map.Entry::getKey, entry -> entry.getValue().getValue())))
|
||||
.build();
|
||||
// Since we used to persist a PremiumList to Datastore here, it is necessary to allocate an ID
|
||||
// here to prevent breaking some hard-coded flow tests. IDs in tests are allocated in a
|
||||
// strictly increasing sequence, if we don't pad out the ID here, we would have to renumber
|
||||
// hundreds of unit tests.
|
||||
// Since we used to persist a PremiumList here, it is necessary to allocate an ID here to
|
||||
// prevent breaking some hard-coded flow tests. IDs in tests are allocated in a strictly
|
||||
// increasing sequence, if we don't pad out the ID here, we would have to renumber hundreds of
|
||||
// unit tests.
|
||||
allocateId();
|
||||
PremiumListDao.save(premiumList);
|
||||
maybeAdvanceClock();
|
||||
|
@ -837,12 +837,12 @@ public final class DatabaseHelper {
|
|||
.containsExactlyElementsIn(expected);
|
||||
}
|
||||
|
||||
/** Assert that the expected billing events are exactly the ones found in the fake Datastore. */
|
||||
/** Assert that the expected billing events are exactly the ones found in test database. */
|
||||
public static void assertBillingEvents(BillingEvent... expected) {
|
||||
assertBillingEventsEqual(getBillingEvents(), asList(expected));
|
||||
}
|
||||
|
||||
/** Assert that the expected billing events set is exactly the one found in the fake Datastore. */
|
||||
/** Assert that the expected billing events set is exactly the one found in test database. */
|
||||
public static void assertBillingEvents(Set<BillingEvent> expected) {
|
||||
assertBillingEventsEqual(getBillingEvents(), expected);
|
||||
}
|
||||
|
@ -1321,60 +1321,6 @@ public final class DatabaseHelper {
|
|||
return entity;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a DATASTORE_PRIMARY_NO_ASYNC state on the {@link DatabaseMigrationStateSchedule}.
|
||||
*
|
||||
* <p>In order to allow for tests to manipulate the clock how they need, we start the transitions
|
||||
* one millisecond after the clock's current time (in case the clock's current value is
|
||||
* START_OF_TIME). We then advance the clock one second so that we're in the
|
||||
* DATASTORE_PRIMARY_READ_ONLY phase.
|
||||
*
|
||||
* <p>We must use the current time, otherwise the setting of the migration state will fail due to
|
||||
* an invalid transition.
|
||||
*/
|
||||
public static void setMigrationScheduleToDatastorePrimaryNoAsync(FakeClock fakeClock) {
|
||||
DateTime now = fakeClock.nowUtc();
|
||||
tm().transact(
|
||||
() ->
|
||||
DatabaseMigrationStateSchedule.set(
|
||||
ImmutableSortedMap.of(
|
||||
START_OF_TIME,
|
||||
MigrationState.DATASTORE_ONLY,
|
||||
now.plusMillis(1),
|
||||
MigrationState.DATASTORE_PRIMARY,
|
||||
now.plusMillis(2),
|
||||
MigrationState.DATASTORE_PRIMARY_NO_ASYNC)));
|
||||
fakeClock.advanceBy(Duration.standardSeconds(1));
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a DATASTORE_PRIMARY_READ_ONLY state on the {@link DatabaseMigrationStateSchedule}.
|
||||
*
|
||||
* <p>In order to allow for tests to manipulate the clock how they need, we start the transitions
|
||||
* one millisecond after the clock's current time (in case the clock's current value is
|
||||
* START_OF_TIME). We then advance the clock one second so that we're in the
|
||||
* DATASTORE_PRIMARY_READ_ONLY phase.
|
||||
*
|
||||
* <p>We must use the current time, otherwise the setting of the migration state will fail due to
|
||||
* an invalid transition.
|
||||
*/
|
||||
public static void setMigrationScheduleToDatastorePrimaryReadOnly(FakeClock fakeClock) {
|
||||
DateTime now = fakeClock.nowUtc();
|
||||
tm().transact(
|
||||
() ->
|
||||
DatabaseMigrationStateSchedule.set(
|
||||
ImmutableSortedMap.of(
|
||||
START_OF_TIME,
|
||||
MigrationState.DATASTORE_ONLY,
|
||||
now.plusMillis(1),
|
||||
MigrationState.DATASTORE_PRIMARY,
|
||||
now.plusMillis(2),
|
||||
MigrationState.DATASTORE_PRIMARY_NO_ASYNC,
|
||||
now.plusMillis(3),
|
||||
MigrationState.DATASTORE_PRIMARY_READ_ONLY)));
|
||||
fakeClock.advanceBy(Duration.standardSeconds(1));
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a SQL_PRIMARY state on the {@link DatabaseMigrationStateSchedule}.
|
||||
*
|
||||
|
|
|
@ -144,7 +144,7 @@ class AuthModuleTest {
|
|||
}
|
||||
|
||||
private Credential getCredential() {
|
||||
// Reconstruct the entire dependency graph, injecting FakeDatastoreFactory and credential
|
||||
// Reconstruct the entire dependency graph, injecting FakeDataStoreFactory and credential
|
||||
// parameters.
|
||||
JacksonFactory jsonFactory = new JacksonFactory();
|
||||
GoogleClientSecrets clientSecrets = getSecrets();
|
||||
|
|
|
@ -167,12 +167,11 @@ public abstract class CommandTestCase<C extends Command> {
|
|||
return writeToNamedTmpFile("cert.pem", certificateFile);
|
||||
}
|
||||
|
||||
/** Reloads the given resource from Datastore. */
|
||||
<T> T reloadResource(T resource) {
|
||||
return tm().transact(() -> tm().loadByEntity(resource));
|
||||
}
|
||||
|
||||
/** Returns count of all poll messages in Datastore. */
|
||||
/** Returns count of all poll messages. */
|
||||
int getPollMessageCount() {
|
||||
return tm().transact(() -> tm().loadAllOf(PollMessage.class).size());
|
||||
}
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
package google.registry.tools;
|
||||
|
||||
import static com.google.common.truth.Truth.assertThat;
|
||||
import static google.registry.model.tld.label.ReservationType.FULLY_BLOCKED;
|
||||
import static google.registry.persistence.transaction.TransactionManagerFactory.tm;
|
||||
import static google.registry.testing.TestDataHelper.loadFile;
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
|
@ -24,10 +22,8 @@ import static org.junit.jupiter.api.Assertions.assertThrows;
|
|||
import com.beust.jcommander.ParameterException;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.io.Files;
|
||||
import com.google.common.truth.Truth8;
|
||||
import google.registry.model.tld.label.ReservedList;
|
||||
import google.registry.model.tld.label.ReservedList.ReservedListEntry;
|
||||
import google.registry.model.tld.label.ReservedListDao;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import org.joda.time.DateTime;
|
||||
|
@ -109,41 +105,4 @@ abstract class CreateOrUpdateReservedListCommandTestCase<
|
|||
.setReservedListMap(labelsToEntries)
|
||||
.build();
|
||||
}
|
||||
|
||||
ReservedList getCloudSqlReservedList(String name) {
|
||||
return tm().transact(
|
||||
() -> {
|
||||
long revisionId =
|
||||
tm().query(
|
||||
"SELECT MAX(rl.revisionId) FROM ReservedList rl WHERE name = :name",
|
||||
Long.class)
|
||||
.setParameter("name", name)
|
||||
.getSingleResult();
|
||||
return tm().query(
|
||||
"FROM ReservedList WHERE revisionId = :revisionId", ReservedList.class)
|
||||
.setParameter("revisionId", revisionId)
|
||||
.getSingleResult();
|
||||
});
|
||||
}
|
||||
|
||||
void verifyXnq9jyb4cInCloudSql() {
|
||||
assertThat(ReservedListDao.checkExists("xn--q9jyb4c_common-reserved")).isTrue();
|
||||
ReservedList persistedList = getCloudSqlReservedList("xn--q9jyb4c_common-reserved");
|
||||
assertThat(persistedList.getName()).isEqualTo("xn--q9jyb4c_common-reserved");
|
||||
assertThat(persistedList.getShouldPublish()).isTrue();
|
||||
assertThat(persistedList.getReservedListEntries())
|
||||
.containsExactly(
|
||||
"baddies",
|
||||
ReservedListEntry.create("baddies", FULLY_BLOCKED, ""),
|
||||
"ford",
|
||||
ReservedListEntry.create("ford", FULLY_BLOCKED, "random comment"));
|
||||
}
|
||||
|
||||
void verifyXnq9jyb4cInDatastore() {
|
||||
Truth8.assertThat(ReservedList.get("xn--q9jyb4c_common-reserved")).isPresent();
|
||||
ReservedList reservedList = ReservedList.get("xn--q9jyb4c_common-reserved").get();
|
||||
assertThat(reservedList.getReservedListEntries()).hasSize(2);
|
||||
Truth8.assertThat(reservedList.getReservationInList("baddies")).hasValue(FULLY_BLOCKED);
|
||||
Truth8.assertThat(reservedList.getReservationInList("ford")).hasValue(FULLY_BLOCKED);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -168,13 +168,6 @@ class CreateReservedListCommandTest
|
|||
runNameTestExpectedFailure("soy_$oy", INVALID_FORMAT_ERROR_MESSAGE);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testSaveToCloudSql_succeeds() throws Exception {
|
||||
runCommandForced("--name=xn--q9jyb4c_common-reserved", "--input=" + reservedTermsPath);
|
||||
verifyXnq9jyb4cInDatastore();
|
||||
verifyXnq9jyb4cInCloudSql();
|
||||
}
|
||||
|
||||
private void runNameTestExpectedFailure(String name, String expectedErrorMsg) {
|
||||
IllegalArgumentException thrown =
|
||||
assertThrows(
|
||||
|
|
|
@ -454,7 +454,7 @@ public final class DomainLockUtilsTest {
|
|||
() -> domainLockUtils.verifyAndApplyLock(verificationCode, false));
|
||||
assertThat(thrown).hasMessageThat().isEqualTo("Domain example.tld is already locked");
|
||||
|
||||
// Failure during Datastore portion shouldn't affect the SQL object
|
||||
// Failure during the lock acquisition portion shouldn't affect the SQL object
|
||||
RegistryLock afterAction = getRegistryLockByVerificationCode(lock.getVerificationCode()).get();
|
||||
assertThat(afterAction).isEqualTo(lock);
|
||||
assertNoDomainChanges();
|
||||
|
|
|
@ -24,11 +24,8 @@ import static java.nio.charset.StandardCharsets.UTF_8;
|
|||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.io.Files;
|
||||
import google.registry.model.tld.label.ReservedList;
|
||||
import google.registry.model.tld.label.ReservedList.ReservedListEntry;
|
||||
import google.registry.model.tld.label.ReservedListDao;
|
||||
import java.io.File;
|
||||
import java.nio.file.Paths;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
|
@ -53,16 +50,6 @@ class UpdateReservedListCommandTest
|
|||
.build());
|
||||
}
|
||||
|
||||
private void populateInitialReservedListInCloudSql(boolean shouldPublish) {
|
||||
ReservedListDao.save(
|
||||
createCloudSqlReservedList(
|
||||
"xn--q9jyb4c_common-reserved",
|
||||
fakeClock.nowUtc(),
|
||||
shouldPublish,
|
||||
ImmutableMap.of(
|
||||
"helicopter", ReservedListEntry.create("helicopter", FULLY_BLOCKED, ""))));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testSuccess() throws Exception {
|
||||
runSuccessfulUpdateTest("--name=xn--q9jyb4c_common-reserved", "--input=" + reservedTermsPath);
|
||||
|
@ -107,29 +94,10 @@ class UpdateReservedListCommandTest
|
|||
IllegalArgumentException thrown =
|
||||
assertThrows(
|
||||
IllegalArgumentException.class,
|
||||
() ->
|
||||
runCommand("--force", "--name=xn--q9jyb4c_poobah", "--input=" + reservedTermsPath));
|
||||
() -> runCommandForced("--name=xn--q9jyb4c_poobah", "--input=" + reservedTermsPath));
|
||||
assertThat(thrown).hasMessageThat().contains(errorMessage);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testSaveToCloudSql_succeeds() throws Exception {
|
||||
populateInitialReservedListInCloudSql(true);
|
||||
runCommandForced("--name=xn--q9jyb4c_common-reserved", "--input=" + reservedTermsPath);
|
||||
verifyXnq9jyb4cInDatastore();
|
||||
verifyXnq9jyb4cInCloudSql();
|
||||
}
|
||||
|
||||
@Test
|
||||
void testSaveToCloudSql_succeedsEvenPreviousListNotExist() throws Exception {
|
||||
// Note that, during the dual-write phase, we always save the reserved list to Cloud SQL without
|
||||
// checking if there is a list with same name. This is to backfill the existing list in Cloud
|
||||
// Datastore when we update it.
|
||||
runCommandForced("--name=xn--q9jyb4c_common-reserved", "--input=" + reservedTermsPath);
|
||||
verifyXnq9jyb4cInDatastore();
|
||||
assertThat(ReservedListDao.checkExists("xn--q9jyb4c_common-reserved")).isTrue();
|
||||
}
|
||||
|
||||
@Test
|
||||
void testSuccess_noChanges() throws Exception {
|
||||
File reservedTermsFile = tmpDir.resolve("xn--q9jyb4c_common-reserved.txt").toFile();
|
||||
|
|
|
@ -254,13 +254,13 @@ class RegistrarSettingsActionTest extends RegistrarSettingsActionTestCase {
|
|||
// Set the user to only have the current role for this registrar
|
||||
action.registrarAccessor =
|
||||
AuthenticatedRegistrarAccessor.createForTesting(ImmutableSetMultimap.of(CLIENT_ID, role));
|
||||
// Load the registrar as it is currently in datastore, and make sure the requested update will
|
||||
// actually change it
|
||||
// Load the registrar as it is currently in the database, and make sure the requested update
|
||||
// will actually change it
|
||||
Registrar registrar = loadRegistrar(CLIENT_ID);
|
||||
assertThat(getter.apply(registrar)).isNotEqualTo(newValue);
|
||||
|
||||
// Call the action to perform the requested update, then load the "updated" registrar and
|
||||
// return the "datastore" registrar to its original state (for the next iteration)
|
||||
// return the "database" registrar to its original state (for the next iteration)
|
||||
Map<String, Object> response =
|
||||
action.handleJsonRequest(
|
||||
ImmutableMap.of(
|
||||
|
@ -309,8 +309,8 @@ class RegistrarSettingsActionTest extends RegistrarSettingsActionTestCase {
|
|||
.build();
|
||||
action.registrarAccessor =
|
||||
AuthenticatedRegistrarAccessor.createForTesting(accessMap);
|
||||
// Load the registrar as it is currently in datastore, and make sure the requested update will
|
||||
// actually change it
|
||||
// Load the registrar as it is currently in the database, and make sure the requested update
|
||||
// will actually change it
|
||||
Registrar registrar = loadRegistrar(CLIENT_ID);
|
||||
assertThat(getter.apply(registrar)).isNotEqualTo(newValue);
|
||||
|
||||
|
|
|
@ -212,8 +212,8 @@ final class RegistryLockVerifyActionTest {
|
|||
|
||||
@Test
|
||||
void testFailure_doesNotChangeLockObject() {
|
||||
// A failure when performing Datastore actions means that no actions should be taken in the
|
||||
// Cloud SQL RegistryLock object
|
||||
// A failure when performing actions means that no actions should be taken in the Cloud SQL
|
||||
// RegistryLock object
|
||||
RegistryLock lock = createLock();
|
||||
saveRegistryLock(lock);
|
||||
// reload the lock to pick up creation time
|
||||
|
@ -221,7 +221,7 @@ final class RegistryLockVerifyActionTest {
|
|||
fakeClock.advanceOneMilli();
|
||||
domain = persistResource(domain.asBuilder().setStatusValues(REGISTRY_LOCK_STATUSES).build());
|
||||
action.run();
|
||||
// we would have failed during the Datastore segment of the action
|
||||
// we would have failed during the lock acquisition segment of the action
|
||||
assertThat(response.getPayload()).contains("Failed: Domain example.tld is already locked");
|
||||
|
||||
// verify that the changes to the SQL object were rolled back
|
||||
|
|
|
@ -284,7 +284,7 @@ public class WhoisActionTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
void testRun_domainFlaggedAsDeletedInDatastore_isConsideredNotFound() {
|
||||
void testRun_domainFlaggedAsDeletedInDatabase_isConsideredNotFound() {
|
||||
Registrar registrar;
|
||||
persistResource(
|
||||
makeDomain(
|
||||
|
@ -432,7 +432,7 @@ public class WhoisActionTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
void testRun_nameserverFlaggedAsDeletedInDatastore_doesntGetLeaked() {
|
||||
void testRun_nameserverFlaggedAsDeletedInDatabase_doesntGetLeaked() {
|
||||
persistResource(
|
||||
FullFieldsTestEntityHelper.makeHost("ns1.cat.lol", "1.2.3.4")
|
||||
.asBuilder()
|
||||
|
|
|
@ -165,7 +165,7 @@ class WhoisHttpActionTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
void testRun_wickedLineFeedForgeryInDatastore_crlfSubstitutedWithSpace() {
|
||||
void testRun_wickedLineFeedForgeryInDatabase_crlfSubstitutedWithSpace() {
|
||||
Contact trl =
|
||||
FullFieldsTestEntityHelper.makeContact("5372808-TRL", "Eric Schmidt", "bog@cat.みんな");
|
||||
trl =
|
||||
|
|
Binary file not shown.
|
@ -1,100 +0,0 @@
|
|||
#standardSQL
|
||||
-- Copyright 2017 The Nomulus Authors. All Rights Reserved.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
-- This query gathers all non-canceled billing events for a given
|
||||
-- YEAR_MONTH in yyyy-MM format.
|
||||
|
||||
SELECT
|
||||
__key__.id AS id,
|
||||
billingTime,
|
||||
eventTime,
|
||||
BillingEvent.clientId AS registrarId,
|
||||
RegistrarData.accountId AS billingId,
|
||||
RegistrarData.poNumber AS poNumber,
|
||||
tld,
|
||||
reason as action,
|
||||
targetId as domain,
|
||||
BillingEvent.domainRepoId as repositoryId,
|
||||
IFNULL(periodYears, 0) as years,
|
||||
BillingEvent.currency AS currency,
|
||||
BillingEvent.amount as amount,
|
||||
-- We'll strip out non-useful flags downstream
|
||||
ARRAY_TO_STRING(flags, " ") AS flags
|
||||
FROM (
|
||||
SELECT
|
||||
*,
|
||||
-- We store cost as "CURRENCY AMOUNT" such as "JPY 800" or "USD 20.00"
|
||||
SPLIT(cost, ' ')[OFFSET(0)] AS currency,
|
||||
SPLIT(cost, ' ')[OFFSET(1)] AS amount,
|
||||
-- Extract everything after the first dot in the domain as the TLD
|
||||
REGEXP_EXTRACT(targetId, r'[.](.+)') AS tld,
|
||||
REGEXP_REPLACE(SPLIT(__key__.path, ', ')[OFFSET(1)], '"', '')
|
||||
AS domainRepoId,
|
||||
COALESCE(cancellationMatchingBillingEvent.path,
|
||||
__key__.path) AS cancellationMatchingPath
|
||||
FROM
|
||||
`my-project-id.latest_datastore_export.OneTime`
|
||||
-- Only include real TLDs (filter prober data)
|
||||
WHERE
|
||||
REGEXP_EXTRACT(targetId, r'[.](.+)') IN (
|
||||
SELECT
|
||||
tldStr
|
||||
FROM
|
||||
`my-project-id.latest_datastore_export.Registry`
|
||||
WHERE
|
||||
invoicingEnabled IS TRUE) ) AS BillingEvent
|
||||
-- Gather billing ID from registrar table
|
||||
-- This is a 'JOIN' as opposed to 'LEFT JOIN' to filter out
|
||||
-- non-billable registrars
|
||||
JOIN (
|
||||
SELECT
|
||||
__key__.name AS clientId,
|
||||
billingIdentifier,
|
||||
IFNULL(poNumber, '') AS poNumber,
|
||||
r.billingAccountMap.currency[SAFE_OFFSET(index)] AS currency,
|
||||
r.billingAccountMap.accountId[SAFE_OFFSET(index)] AS accountId
|
||||
FROM
|
||||
`my-project-id.latest_datastore_export.Registrar` AS r,
|
||||
UNNEST(GENERATE_ARRAY(0, ARRAY_LENGTH(r.billingAccountMap.currency) - 1))
|
||||
AS index
|
||||
WHERE billingAccountMap IS NOT NULL
|
||||
AND type = 'REAL') AS RegistrarData
|
||||
ON
|
||||
BillingEvent.clientId = RegistrarData.clientId
|
||||
AND BillingEvent.currency = RegistrarData.currency
|
||||
-- Gather cancellations
|
||||
LEFT JOIN (
|
||||
SELECT __key__.id AS cancellationId,
|
||||
COALESCE(refOneTime.path, refRecurring.path) AS cancelledEventPath,
|
||||
eventTime as cancellationTime,
|
||||
billingTime as cancellationBillingTime
|
||||
FROM
|
||||
(SELECT
|
||||
*,
|
||||
-- Count everything after first dot as TLD (to support multi-part TLDs).
|
||||
REGEXP_EXTRACT(targetId, r'[.](.+)') AS tld
|
||||
FROM
|
||||
`my-project-id.latest_datastore_export.Cancellation`)
|
||||
) AS Cancellation
|
||||
ON BillingEvent.cancellationMatchingPath = Cancellation.cancelledEventPath
|
||||
AND BillingEvent.billingTime = Cancellation.cancellationBillingTime
|
||||
WHERE billingTime BETWEEN TIMESTAMP('2017-10-01 00:00:00.000000')
|
||||
AND TIMESTAMP('2017-10-31 23:59:59.999999')
|
||||
-- Filter out canceled events
|
||||
AND Cancellation.cancellationId IS NULL
|
||||
ORDER BY
|
||||
billingTime DESC,
|
||||
id,
|
||||
tld
|
File diff suppressed because it is too large
Load diff
Binary file not shown.
Before Width: | Height: | Size: 1.2 MiB |
|
@ -1,17 +0,0 @@
|
|||
AllocationToken
|
||||
Cancellation
|
||||
Contact
|
||||
Domain
|
||||
EntityGroupRoot
|
||||
EppResourceIndex
|
||||
ForeignKeyContactIndex
|
||||
ForeignKeyDomainIndex
|
||||
ForeignKeyHostIndex
|
||||
HistoryEntry
|
||||
Host
|
||||
Modification
|
||||
OneTime
|
||||
PollMessage
|
||||
RdeRevision
|
||||
Recurring
|
||||
Registrar
|
|
@ -1,18 +0,0 @@
|
|||
{
|
||||
"name": "projects/registry-project-id/operations/ASAzNjMwOTEyNjUJ",
|
||||
"metadata": {
|
||||
"@type": "type.googleapis.com/google.datastore.admin.v1.ExportEntitiesMetadata",
|
||||
"common": {
|
||||
"startTime": "2014-08-01T01:02:03Z",
|
||||
"operationType": "EXPORT_ENTITIES",
|
||||
"state": "PROCESSING"
|
||||
},
|
||||
"entityFilter": {
|
||||
"kinds": [
|
||||
"one",
|
||||
"two"
|
||||
]
|
||||
},
|
||||
"outputUrlPrefix": "gs://registry-project-id-datastore-export-test/2014-08-01T01:02:03_99364"
|
||||
}
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
{
|
||||
"name": "projects/registry-project-id/operations/ASAzNjMwOTEyNjUJ",
|
||||
"metadata": {
|
||||
"@type": "type.googleapis.com/google.datastore.admin.v1.ExportEntitiesMetadata",
|
||||
"common": {
|
||||
"startTime": "2014-08-01T01:02:03Z",
|
||||
"endTime": "2014-08-01T01:32:03Z",
|
||||
"operationType": "EXPORT_ENTITIES",
|
||||
"state": "SUCCESSFUL"
|
||||
},
|
||||
"entityFilter": {
|
||||
"kinds": [
|
||||
"one",
|
||||
"two"
|
||||
]
|
||||
},
|
||||
"outputUrlPrefix": "gs://registry-project-id-datastore-export-test/2014-08-01T01:02:03_99364"
|
||||
},
|
||||
"done": true
|
||||
}
|
|
@ -1,2 +0,0 @@
|
|||
Registrar
|
||||
ServerSecret
|
|
@ -1,5 +0,0 @@
|
|||
{
|
||||
"startTime": "2018-10-29T16:01:04.645299Z",
|
||||
"operationType": "EXPORT_ENTITIES",
|
||||
"state": "SUCCESSFUL"
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
"kinds": [
|
||||
"Registry",
|
||||
"Registrar",
|
||||
"Domain"
|
||||
]
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
{
|
||||
"entityFilter": {
|
||||
"kinds": ["Registry", "Registrar", "Domain"]
|
||||
},
|
||||
"outputUrlPrefix": "gs://mybucket/path"
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
{
|
||||
"@type": "type.googleapis.com/google.datastore.admin.v1.ExportEntitiesMetadata",
|
||||
"common": {
|
||||
"startTime": "2018-10-29T16:01:04.645299Z",
|
||||
"endTime": "2018-10-29T16:02:19.009859Z",
|
||||
"operationType": "EXPORT_ENTITIES",
|
||||
"state": "SUCCESSFUL"
|
||||
},
|
||||
"progressEntities": {
|
||||
"workCompleted": "51797",
|
||||
"workEstimated": "54513"
|
||||
},
|
||||
"progressBytes": {
|
||||
"workCompleted": "96908367",
|
||||
"workEstimated": "73773755"
|
||||
},
|
||||
"entityFilter": {
|
||||
"kinds": [
|
||||
"Registry",
|
||||
"Registrar",
|
||||
"Domain"
|
||||
]
|
||||
},
|
||||
"outputUrlPrefix": "gs://domain-registry-alpha-datastore-export-test/2018-10-29T16:01:04_99364"
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
{
|
||||
"name": "projects/domain-registry-alpha/operations/ASAzNjMwOTEyNjUJ",
|
||||
"metadata": {
|
||||
"@type": "type.googleapis.com/google.datastore.admin.v1.ExportEntitiesMetadata",
|
||||
"common": {
|
||||
"startTime": "2018-10-29T16:01:04.645299Z",
|
||||
"operationType": "EXPORT_ENTITIES",
|
||||
"state": "PROCESSING"
|
||||
},
|
||||
"entityFilter": {
|
||||
"kinds": [
|
||||
"Registry",
|
||||
"Registrar",
|
||||
"Domain"
|
||||
]
|
||||
},
|
||||
"outputUrlPrefix": "gs://domain-registry-alpha-datastore-export-test/2018-10-29T16:01:04_99364"
|
||||
}
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
{
|
||||
"operations": [
|
||||
{
|
||||
"name": "projects/domain-registry-alpha/operations/ASAzNjMwOTEyNjUJ",
|
||||
"metadata": {
|
||||
"@type": "type.googleapis.com/google.datastore.admin.v1.ExportEntitiesMetadata",
|
||||
"common": {
|
||||
"startTime": "2018-10-29T16:01:04.645299Z",
|
||||
"operationType": "EXPORT_ENTITIES",
|
||||
"state": "PROCESSING"
|
||||
},
|
||||
"progressEntities": {
|
||||
"workCompleted": "51797",
|
||||
"workEstimated": "54513"
|
||||
},
|
||||
"entityFilter": {
|
||||
"kinds": [
|
||||
"Registry",
|
||||
"Registrar",
|
||||
"Domain"
|
||||
]
|
||||
},
|
||||
"outputUrlPrefix": "gs://domain-registry-alpha-datastore-export-test/2018-10-29T16:01:04_99364"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "projects/domain-registry-alpha/operations/ASAzNjMwOTEyNjUJ",
|
||||
"metadata": {
|
||||
"@type": "type.googleapis.com/google.datastore.admin.v1.ExportEntitiesMetadata",
|
||||
"common": {
|
||||
"startTime": "2018-10-29T16:01:04.645299Z",
|
||||
"endTime": "2018-10-29T16:02:04.645299Z",
|
||||
"operationType": "EXPORT_ENTITIES",
|
||||
"state": "PROCESSING"
|
||||
},
|
||||
"progressEntities": {
|
||||
"workCompleted": "51797",
|
||||
"workEstimated": "54513"
|
||||
},
|
||||
"progressBytes": {
|
||||
"workCompleted": "96908367",
|
||||
"workEstimated": "73773755"
|
||||
},
|
||||
"entityFilter": {
|
||||
"kinds": [
|
||||
"Registry",
|
||||
"Registrar",
|
||||
"Domain"
|
||||
]
|
||||
},
|
||||
"outputUrlPrefix": "gs://domain-registry-alpha-datastore-export-test/2018-10-29T16:01:04_99364"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
{
|
||||
"done" : false,
|
||||
"metadata" : {
|
||||
"common" : {
|
||||
"operationType" : "EXPORT_ENTITIES",
|
||||
"startTime" : "2018-10-29T16:01:04.645299Z",
|
||||
"state" : "PROCESSING"
|
||||
},
|
||||
"entityFilter" : {
|
||||
"kinds" : [ "Registry", "Registrar", "Domain" ]
|
||||
},
|
||||
"outputUrlPrefix" : "gs://domain-registry-alpha-datastore-export-test/2018-10-29T16:01:04_99364",
|
||||
"@type" : "type.googleapis.com/google.datastore.admin.v1.ExportEntitiesMetadata"
|
||||
},
|
||||
"name" : "projects/domain-registry-alpha/operations/ASAzNjMwOTEyNjUJ"
|
||||
}
|
|
@ -1,4 +0,0 @@
|
|||
{
|
||||
"workCompleted": "51797",
|
||||
"workEstimated": "54513"
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
{
|
||||
"done" : true,
|
||||
"metadata" : {
|
||||
"common" : {
|
||||
"endTime" : "2014-08-01T01:32:03Z",
|
||||
"operationType" : "EXPORT_ENTITIES",
|
||||
"startTime" : "2014-08-01T01:02:03Z",
|
||||
"state" : "SUCCESSFUL"
|
||||
},
|
||||
"entityFilter" : {
|
||||
"kinds" : [ "one", "two" ]
|
||||
},
|
||||
"outputUrlPrefix" : "gs://registry-project-id-datastore-export-test/2014-08-01T01:02:03_99364",
|
||||
"@type" : "type.googleapis.com/google.datastore.admin.v1.ExportEntitiesMetadata"
|
||||
},
|
||||
"name" : "projects/registry-project-id/operations/ASAzNjMwOTEyNjUJ"
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
AllocationToken
|
||||
Cancellation
|
||||
Contact
|
||||
Domain
|
||||
EppResourceIndex
|
||||
ForeignKeyContactIndex
|
||||
ForeignKeyDomainIndex
|
||||
ForeignKeyHostIndex
|
||||
HistoryEntry
|
||||
Host
|
||||
Modification
|
||||
OneTime
|
||||
PollMessage
|
||||
Recurring
|
||||
Registrar
|
|
@ -1,97 +0,0 @@
|
|||
#standardSQL
|
||||
-- Copyright 2017 The Nomulus Authors. All Rights Reserved.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
-- This query pulls from all intermediary tables to create the activity
|
||||
-- report csv, via a table transpose and sum over all activity report fields.
|
||||
|
||||
SELECT
|
||||
RealTlds.tld AS tld,
|
||||
SUM(IF(metricName = 'operational-registrars', count, 0)) AS operational_registrars,
|
||||
-- We use the Centralized Zone Data Service.
|
||||
"CZDS" AS zfa_passwords,
|
||||
SUM(IF(metricName = 'whois-43-queries', count, 0)) AS whois_43_queries,
|
||||
SUM(IF(metricName = 'web-whois-queries', count, 0)) AS web_whois_queries,
|
||||
-- We don't support searchable WHOIS.
|
||||
0 AS searchable_whois_queries,
|
||||
-- DNS queries for UDP/TCP are all assumed to be received/responded.
|
||||
SUM(IF(metricName = 'dns-udp-queries', count, 0)) AS dns_udp_queries_received,
|
||||
SUM(IF(metricName = 'dns-udp-queries', count, 0)) AS dns_udp_queries_responded,
|
||||
SUM(IF(metricName = 'dns-tcp-queries', count, 0)) AS dns_tcp_queries_received,
|
||||
SUM(IF(metricName = 'dns-tcp-queries', count, 0)) AS dns_tcp_queries_responded,
|
||||
-- SRS metrics.
|
||||
SUM(IF(metricName = 'srs-dom-check', count, 0)) AS srs_dom_check,
|
||||
SUM(IF(metricName = 'srs-dom-create', count, 0)) AS srs_dom_create,
|
||||
SUM(IF(metricName = 'srs-dom-delete', count, 0)) AS srs_dom_delete,
|
||||
SUM(IF(metricName = 'srs-dom-info', count, 0)) AS srs_dom_info,
|
||||
SUM(IF(metricName = 'srs-dom-renew', count, 0)) AS srs_dom_renew,
|
||||
SUM(IF(metricName = 'srs-dom-rgp-restore-report', count, 0)) AS srs_dom_rgp_restore_report,
|
||||
SUM(IF(metricName = 'srs-dom-rgp-restore-request', count, 0)) AS srs_dom_rgp_restore_request,
|
||||
SUM(IF(metricName = 'srs-dom-transfer-approve', count, 0)) AS srs_dom_transfer_approve,
|
||||
SUM(IF(metricName = 'srs-dom-transfer-cancel', count, 0)) AS srs_dom_transfer_cancel,
|
||||
SUM(IF(metricName = 'srs-dom-transfer-query', count, 0)) AS srs_dom_transfer_query,
|
||||
SUM(IF(metricName = 'srs-dom-transfer-reject', count, 0)) AS srs_dom_transfer_reject,
|
||||
SUM(IF(metricName = 'srs-dom-transfer-request', count, 0)) AS srs_dom_transfer_request,
|
||||
SUM(IF(metricName = 'srs-dom-update', count, 0)) AS srs_dom_update,
|
||||
SUM(IF(metricName = 'srs-host-check', count, 0)) AS srs_host_check,
|
||||
SUM(IF(metricName = 'srs-host-create', count, 0)) AS srs_host_create,
|
||||
SUM(IF(metricName = 'srs-host-delete', count, 0)) AS srs_host_delete,
|
||||
SUM(IF(metricName = 'srs-host-info', count, 0)) AS srs_host_info,
|
||||
SUM(IF(metricName = 'srs-host-update', count, 0)) AS srs_host_update,
|
||||
SUM(IF(metricName = 'srs-cont-check', count, 0)) AS srs_cont_check,
|
||||
SUM(IF(metricName = 'srs-cont-create', count, 0)) AS srs_cont_create,
|
||||
SUM(IF(metricName = 'srs-cont-delete', count, 0)) AS srs_cont_delete,
|
||||
SUM(IF(metricName = 'srs-cont-info', count, 0)) AS srs_cont_info,
|
||||
SUM(IF(metricName = 'srs-cont-transfer-approve', count, 0)) AS srs_cont_transfer_approve,
|
||||
SUM(IF(metricName = 'srs-cont-transfer-cancel', count, 0)) AS srs_cont_transfer_cancel,
|
||||
SUM(IF(metricName = 'srs-cont-transfer-query', count, 0)) AS srs_cont_transfer_query,
|
||||
SUM(IF(metricName = 'srs-cont-transfer-reject', count, 0)) AS srs_cont_transfer_reject,
|
||||
SUM(IF(metricName = 'srs-cont-transfer-request', count, 0)) AS srs_cont_transfer_request,
|
||||
SUM(IF(metricName = 'srs-cont-update', count, 0)) AS srs_cont_update
|
||||
-- Cross join a list of all TLDs against TLD-specific metrics and then
|
||||
-- filter so that only metrics with that TLD or a NULL TLD are counted
|
||||
-- towards a given TLD.
|
||||
FROM (
|
||||
SELECT tldStr AS tld
|
||||
FROM `domain-registry-alpha.latest_datastore_export.Registry`
|
||||
WHERE tldType = 'REAL'
|
||||
) as RealTlds
|
||||
CROSS JOIN(
|
||||
SELECT
|
||||
tld,
|
||||
metricName,
|
||||
count
|
||||
FROM
|
||||
(
|
||||
-- BEGIN INTERMEDIARY DATA SOURCES --
|
||||
-- Dummy data source to ensure all TLDs appear in report, even if
|
||||
-- they have no recorded metrics for the month.
|
||||
SELECT STRING(NULL) AS tld, STRING(NULL) AS metricName, 0 as count
|
||||
UNION ALL
|
||||
SELECT * FROM
|
||||
`domain-registry-alpha.icann_reporting.registrar_operating_status_201709`
|
||||
UNION ALL
|
||||
SELECT * FROM
|
||||
`domain-registry-alpha.icann_reporting.dns_counts_201709`
|
||||
UNION ALL
|
||||
SELECT * FROM
|
||||
`domain-registry-alpha.icann_reporting.epp_metrics_201709`
|
||||
UNION ALL
|
||||
SELECT * FROM
|
||||
`domain-registry-alpha.icann_reporting.whois_counts_201709`
|
||||
-- END INTERMEDIARY DATA SOURCES --
|
||||
)) AS TldMetrics
|
||||
WHERE RealTlds.tld = TldMetrics.tld OR TldMetrics.tld IS NULL
|
||||
GROUP BY tld
|
||||
ORDER BY tld
|
|
@ -1,29 +0,0 @@
|
|||
#standardSQL
|
||||
-- Copyright 2017 The Nomulus Authors. All Rights Reserved.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
-- Query for DNS metrics.
|
||||
|
||||
-- You must configure this yourself to enable activity reporting, according
|
||||
-- to whatever metrics your DNS provider makes available. We hope to make
|
||||
-- this available in the open-source build in the near future.
|
||||
|
||||
SELECT
|
||||
STRING(NULL) AS tld,
|
||||
metricName,
|
||||
-1 AS count
|
||||
FROM ((
|
||||
SELECT 'dns-udp-queries' AS metricName)
|
||||
UNION ALL
|
||||
(SELECT 'dns-tcp-queries' AS metricName))
|
|
@ -1,58 +0,0 @@
|
|||
#standardSQL
|
||||
-- Copyright 2017 The Nomulus Authors. All Rights Reserved.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
-- Query FlowReporter JSON log messages and calculate SRS metrics.
|
||||
|
||||
-- We use ugly regex's over the monthly appengine logs to determine how many
|
||||
-- EPP requests we received for each command. For example:
|
||||
-- {"commandType":"check"...,"targetIds":["ais.a.how"],
|
||||
-- "tld":"","tlds":["a.how"],"icannActivityReportField":"srs-dom-check"}
|
||||
|
||||
SELECT
|
||||
-- Remove quotation marks from tld fields.
|
||||
REGEXP_EXTRACT(tld, '^"(.*)"$') AS tld,
|
||||
activityReportField AS metricName,
|
||||
COUNT(*) AS count
|
||||
FROM (
|
||||
SELECT
|
||||
-- TODO(b/32486667): Replace with JSON.parse() UDF when available for views
|
||||
SPLIT(
|
||||
REGEXP_EXTRACT(JSON_EXTRACT(json, '$.tlds'), r'^\[(.*)\]$')) AS tlds,
|
||||
JSON_EXTRACT_SCALAR(json,
|
||||
'$.resourceType') AS resourceType,
|
||||
JSON_EXTRACT_SCALAR(json,
|
||||
'$.icannActivityReportField') AS activityReportField
|
||||
FROM (
|
||||
SELECT
|
||||
-- Extract the logged JSON payload.
|
||||
REGEXP_EXTRACT(logMessage, r'FLOW-LOG-SIGNATURE-METADATA: (.*)\n?$')
|
||||
AS json
|
||||
FROM `domain-registry-alpha.icann_reporting.monthly_logs_201709` AS logs
|
||||
JOIN
|
||||
UNNEST(logs.logMessage) AS logMessage
|
||||
WHERE
|
||||
STARTS_WITH(logMessage, "google.registry.flows.FlowReporter recordToLogs: FLOW-LOG-SIGNATURE-METADATA"))) AS regexes
|
||||
JOIN
|
||||
-- Unnest the JSON-parsed tlds.
|
||||
UNNEST(regexes.tlds) AS tld
|
||||
-- Exclude cases that can't be tabulated correctly, where activityReportField
|
||||
-- is null/empty, or TLD is null/empty despite being a domain flow.
|
||||
WHERE
|
||||
activityReportField != ''
|
||||
AND (tld != '' OR resourceType != 'domain')
|
||||
GROUP BY
|
||||
tld, metricName
|
||||
ORDER BY
|
||||
tld, metricName
|
|
@ -1,30 +0,0 @@
|
|||
#standardSQL
|
||||
-- Copyright 2017 The Nomulus Authors. All Rights Reserved.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
-- Query to fetch AppEngine request logs for the report month.
|
||||
|
||||
-- START_OF_MONTH and END_OF_MONTH should be in YYYYMM01 format.
|
||||
|
||||
SELECT
|
||||
protoPayload.resource AS requestPath,
|
||||
ARRAY(
|
||||
SELECT
|
||||
logMessage
|
||||
FROM
|
||||
UNNEST(protoPayload.line)) AS logMessage
|
||||
FROM
|
||||
`domain-registry-alpha.appengine_logs.appengine_googleapis_com_request_log_*`
|
||||
WHERE
|
||||
_TABLE_SUFFIX BETWEEN '20170901' AND '20170930'
|
|
@ -1,85 +0,0 @@
|
|||
#standardSQL
|
||||
-- Copyright 2017 The Nomulus Authors. All Rights Reserved.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
-- Counts the number of mutating transactions each registrar made.
|
||||
|
||||
-- We populate the fields through explicit logging of
|
||||
-- DomainTransactionRecords, which contain all necessary information for
|
||||
-- reporting (such as reporting time, report field, report amount, etc.
|
||||
|
||||
-- A special note on transfers: we only record 'TRANSFER_SUCCESSFUL' or
|
||||
-- 'TRANSFER_NACKED', and we can infer the gaining and losing parties
|
||||
-- from the enclosing HistoryEntry's clientId and otherClientId
|
||||
-- respectively. This query templates the client ID, field for transfer
|
||||
-- success, field for transfer nacks and default field. This allows us to
|
||||
-- create one query for TRANSFER_GAINING and the other report fields,
|
||||
-- and one query for TRANSFER_LOSING fields from the same template.
|
||||
|
||||
-- This outer select just converts the registrar's clientId to their name.
|
||||
SELECT
|
||||
tld,
|
||||
registrar_table.registrarName AS registrar_name,
|
||||
metricName,
|
||||
metricValue
|
||||
FROM (
|
||||
SELECT
|
||||
tld,
|
||||
clientId,
|
||||
CASE
|
||||
WHEN field = 'TRANSFER_SUCCESSFUL' THEN 'TRANSFER_LOSING_SUCCESSFUL'
|
||||
WHEN field = 'TRANSFER_NACKED' THEN 'TRANSFER_LOSING_NACKED'
|
||||
ELSE NULL
|
||||
END AS metricName,
|
||||
SUM(amount) AS metricValue
|
||||
FROM (
|
||||
SELECT
|
||||
CASE
|
||||
-- Explicit transfer acks (approve) and nacks (reject) are done
|
||||
-- by the opposing registrar. Thus, for these specific actions,
|
||||
-- we swap the 'otherClientId' with the 'clientId' to properly
|
||||
-- account for this reversal.
|
||||
WHEN (entries.type = 'DOMAIN_TRANSFER_APPROVE'
|
||||
OR entries.type = 'DOMAIN_TRANSFER_REJECT')
|
||||
THEN entries.clientId
|
||||
ELSE entries.otherClientId
|
||||
END AS clientId,
|
||||
entries.domainTransactionRecords.tld[SAFE_OFFSET(index)] AS tld,
|
||||
entries.domainTransactionRecords.reportingTime[SAFE_OFFSET(index)]
|
||||
AS reportingTime,
|
||||
entries.domainTransactionRecords.reportField[SAFE_OFFSET(index)]
|
||||
AS field,
|
||||
entries.domainTransactionRecords.reportAmount[SAFE_OFFSET(index)]
|
||||
AS amount
|
||||
FROM
|
||||
`domain-registry-alpha.latest_datastore_export.HistoryEntry`
|
||||
AS entries,
|
||||
-- This allows us to 'loop' through the arrays in parallel by index
|
||||
UNNEST(GENERATE_ARRAY(0, ARRAY_LENGTH(
|
||||
entries.domainTransactionRecords.tld) - 1)) AS index
|
||||
-- Ignore null entries
|
||||
WHERE entries.domainTransactionRecords IS NOT NULL )
|
||||
-- Only look at this month's data
|
||||
WHERE reportingTime
|
||||
BETWEEN TIMESTAMP('2017-09-01 00:00:00.000')
|
||||
AND TIMESTAMP('2017-09-30 23:59:59.999')
|
||||
GROUP BY
|
||||
tld,
|
||||
clientId,
|
||||
field ) AS counts_table
|
||||
JOIN
|
||||
`domain-registry-alpha.latest_datastore_export.Registrar`
|
||||
AS registrar_table
|
||||
ON
|
||||
counts_table.clientId = registrar_table.__key__.name
|
|
@ -1,33 +0,0 @@
|
|||
#standardSQL
|
||||
-- Copyright 2017 The Nomulus Authors. All Rights Reserved.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
-- Query for WHOIS metrics.
|
||||
|
||||
-- This searches the monthly appengine logs for Whois requests, and
|
||||
-- counts the number of hits via both endpoints (port 43 and the web).
|
||||
|
||||
SELECT
|
||||
STRING(NULL) AS tld,
|
||||
CASE
|
||||
WHEN requestPath = '/_dr/whois' THEN 'whois-43-queries'
|
||||
WHEN SUBSTR(requestPath, 0, 7) = '/whois/' THEN 'web-whois-queries'
|
||||
END AS metricName,
|
||||
COUNT(requestPath) AS count
|
||||
FROM
|
||||
`domain-registry-alpha.icann_reporting.monthly_logs_201709`
|
||||
GROUP BY
|
||||
metricName
|
||||
HAVING
|
||||
metricName IS NOT NULL
|
|
@ -3,7 +3,6 @@ handlers = java.util.logging.ConsoleHandler
|
|||
|
||||
google.registry.level = FINE
|
||||
|
||||
com.google.appengine.api.datastore.dev.LocalDatastoreService.level = WARNING
|
||||
com.google.appengine.api.taskqueue.dev.level = WARNING
|
||||
com.google.apphosting.utils.config.level = WARNING
|
||||
org.quartz.level = WARNING
|
||||
|
|
Binary file not shown.
Binary file not shown.
Loading…
Add table
Add a link
Reference in a new issue