mirror of
https://github.com/google/nomulus.git
synced 2025-05-17 01:47:14 +02:00
Consolidate BigQuery handling into one place
I'm writing a follow-up CL that will send integrity checking data to BigQuery, and that is made a lot easier by centralizing the BigQuery connection logic. ------------- Created by MOE: https://github.com/google/moe MOE_MIGRATED_REVID=119375766
This commit is contained in:
parent
c880a042a7
commit
755fce9e52
12 changed files with 329 additions and 347 deletions
|
@ -16,6 +16,7 @@ java_library(
|
|||
"//java/com/google/api/client/http",
|
||||
"//java/com/google/api/client/json",
|
||||
"//java/com/google/api/client/json/jackson2",
|
||||
"//java/com/google/common/annotations",
|
||||
"//java/com/google/common/base",
|
||||
"//java/com/google/common/collect",
|
||||
"//java/com/google/common/io",
|
||||
|
|
|
@ -14,20 +14,154 @@
|
|||
|
||||
package com.google.domain.registry.bigquery;
|
||||
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.domain.registry.bigquery.BigquerySchemas.knownTableSchemas;
|
||||
|
||||
import com.google.api.client.extensions.appengine.http.UrlFetchTransport;
|
||||
import com.google.api.client.googleapis.extensions.appengine.auth.oauth2.AppIdentityCredential;
|
||||
import com.google.api.client.http.HttpRequestInitializer;
|
||||
import com.google.api.client.http.HttpTransport;
|
||||
import com.google.api.client.json.JsonFactory;
|
||||
import com.google.api.client.json.jackson2.JacksonFactory;
|
||||
import com.google.api.services.bigquery.Bigquery;
|
||||
import com.google.api.services.bigquery.BigqueryScopes;
|
||||
import com.google.api.services.bigquery.model.Dataset;
|
||||
import com.google.api.services.bigquery.model.DatasetReference;
|
||||
import com.google.api.services.bigquery.model.Table;
|
||||
import com.google.api.services.bigquery.model.TableFieldSchema;
|
||||
import com.google.api.services.bigquery.model.TableReference;
|
||||
import com.google.api.services.bigquery.model.TableSchema;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.domain.registry.util.FormattingLogger;
|
||||
import com.google.domain.registry.util.NonFinalForTesting;
|
||||
|
||||
/** Factory for returning {@link Bigquery} instances. */
|
||||
import java.io.IOException;
|
||||
import java.util.Set;
|
||||
|
||||
/** Factory for creating {@link Bigquery} connections. */
|
||||
public class BigqueryFactory {
|
||||
|
||||
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
|
||||
|
||||
// Cross-request caches to avoid unnecessary RPCs.
|
||||
@NonFinalForTesting
|
||||
private Set<String> knownTables = Sets.newConcurrentHashSet();
|
||||
|
||||
@NonFinalForTesting
|
||||
private Set<String> datasets = Sets.newConcurrentHashSet();
|
||||
|
||||
@NonFinalForTesting
|
||||
@VisibleForTesting
|
||||
Subfactory subfactory = new Subfactory();
|
||||
|
||||
/** This class is broken out solely so that it can be mocked inside of tests. */
|
||||
static class Subfactory {
|
||||
|
||||
public Bigquery create(
|
||||
String applicationName,
|
||||
HttpTransport transport,
|
||||
JsonFactory jsonFactory,
|
||||
HttpRequestInitializer httpRequestInitializer) {
|
||||
return new Bigquery.Builder(transport, jsonFactory, httpRequestInitializer)
|
||||
.setApplicationName(applicationName)
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns a new connection to BigQuery. */
|
||||
public Bigquery create(
|
||||
String applicationName,
|
||||
HttpTransport transport,
|
||||
JsonFactory jsonFactory,
|
||||
HttpRequestInitializer httpRequestInitializer) {
|
||||
return new Bigquery.Builder(transport, jsonFactory, httpRequestInitializer)
|
||||
.setApplicationName(applicationName)
|
||||
.build();
|
||||
return subfactory.create(applicationName, transport, jsonFactory, httpRequestInitializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new connection to Bigquery, first ensuring that the given dataset exists in the
|
||||
* project with the given id, creating it if required.
|
||||
*/
|
||||
public Bigquery create(String projectId, String datasetId) throws IOException {
|
||||
Bigquery bigquery = create(
|
||||
getClass().getSimpleName(),
|
||||
new UrlFetchTransport(),
|
||||
new JacksonFactory(),
|
||||
new AppIdentityCredential(BigqueryScopes.all()));
|
||||
|
||||
// Note: it's safe for multiple threads to call this as the dataset will only be created once.
|
||||
if (!datasets.contains(datasetId)) {
|
||||
ensureDataset(bigquery, projectId, datasetId);
|
||||
datasets.add(datasetId);
|
||||
}
|
||||
|
||||
return bigquery;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new connection to Bigquery, first ensuring that the given dataset and table exist in
|
||||
* project with the given id, creating them if required.
|
||||
*/
|
||||
public Bigquery create(String projectId, String datasetId, String tableId)
|
||||
throws IOException {
|
||||
Bigquery bigquery = create(projectId, datasetId);
|
||||
checkArgument(knownTableSchemas.containsKey(tableId), "Unknown table ID: %s", tableId);
|
||||
|
||||
if (!knownTables.contains(tableId)) {
|
||||
ensureTable(
|
||||
bigquery,
|
||||
new TableReference()
|
||||
.setDatasetId(datasetId)
|
||||
.setProjectId(projectId)
|
||||
.setTableId(tableId),
|
||||
knownTableSchemas.get(tableId));
|
||||
knownTables.add(tableId);
|
||||
}
|
||||
|
||||
return bigquery;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures the dataset exists by trying to create it. Note that it's not appreciably cheaper
|
||||
* to check for dataset existence than it is to try to create it and check for exceptions.
|
||||
*/
|
||||
// Note that these are not static so they can be mocked for testing.
|
||||
private void ensureDataset(Bigquery bigquery, String projectId, String datasetId)
|
||||
throws IOException {
|
||||
try {
|
||||
bigquery.datasets()
|
||||
.insert(projectId,
|
||||
new Dataset().setDatasetReference(
|
||||
new DatasetReference()
|
||||
.setProjectId(projectId)
|
||||
.setDatasetId(datasetId)))
|
||||
.execute();
|
||||
} catch (IOException e) {
|
||||
// Swallow errors about a duplicate dataset, and throw any other ones.
|
||||
if (!BigqueryJobFailureException.create(e).getReason().equals("duplicate")) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Ensures the table exists in Bigquery. */
|
||||
private void ensureTable(
|
||||
Bigquery bigquery, TableReference table, ImmutableList<TableFieldSchema> schema)
|
||||
throws IOException {
|
||||
try {
|
||||
bigquery.tables().insert(table.getProjectId(), table.getDatasetId(), new Table()
|
||||
.setSchema(new TableSchema().setFields(schema))
|
||||
.setTableReference(table))
|
||||
.execute();
|
||||
logger.infofmt("Created BigQuery table %s:%s.%s", table.getProjectId(), table.getDatasetId(),
|
||||
table.getTableId());
|
||||
} catch (IOException e) {
|
||||
// Swallow errors about a table that exists, and throw any other ones.
|
||||
if (!BigqueryJobFailureException.create(e).getReason().equals("duplicate")) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,75 +0,0 @@
|
|||
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package com.google.domain.registry.bigquery;
|
||||
|
||||
|
||||
import com.google.api.services.bigquery.Bigquery;
|
||||
import com.google.api.services.bigquery.model.Dataset;
|
||||
import com.google.api.services.bigquery.model.DatasetReference;
|
||||
import com.google.api.services.bigquery.model.Table;
|
||||
import com.google.api.services.bigquery.model.TableFieldSchema;
|
||||
import com.google.api.services.bigquery.model.TableReference;
|
||||
import com.google.api.services.bigquery.model.TableSchema;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.domain.registry.util.FormattingLogger;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/** Helpers for Bigquery. */
|
||||
public class BigqueryHelper {
|
||||
|
||||
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
|
||||
|
||||
/**
|
||||
* Ensures the dataset exists by trying to create it. Note that it's not appreciably cheaper
|
||||
* to check for dataset existence than it is to try to create it and check for exceptions.
|
||||
*/
|
||||
// Note that these are not static so they can be mocked for testing.
|
||||
public void ensureDataset(Bigquery bigquery, String projectId, String datasetId)
|
||||
throws IOException {
|
||||
try {
|
||||
bigquery.datasets()
|
||||
.insert(projectId,
|
||||
new Dataset().setDatasetReference(
|
||||
new DatasetReference()
|
||||
.setProjectId(projectId)
|
||||
.setDatasetId(datasetId)))
|
||||
.execute();
|
||||
} catch (IOException e) {
|
||||
// Swallow errors about a duplicate dataset, and throw any other ones.
|
||||
if (!BigqueryJobFailureException.create(e).getReason().equals("duplicate")) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Ensures the table exists in Bigquery. */
|
||||
public void ensureTable(Bigquery bigquery, TableReference table,
|
||||
ImmutableList<TableFieldSchema> schema) throws IOException {
|
||||
try {
|
||||
bigquery.tables().insert(table.getProjectId(), table.getDatasetId(), new Table()
|
||||
.setSchema(new TableSchema().setFields(schema))
|
||||
.setTableReference(table))
|
||||
.execute();
|
||||
logger.infofmt("Created BigQuery table %s:%s.%s", table.getProjectId(), table.getDatasetId(),
|
||||
table.getTableId());
|
||||
} catch (IOException e) {
|
||||
// Swallow errors about a table that exists, and throw any other ones.
|
||||
if (!BigqueryJobFailureException.create(e).getReason().equals("duplicate")) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package com.google.domain.registry.bigquery;
|
||||
|
||||
import com.google.api.services.bigquery.model.TableFieldSchema;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.domain.registry.bigquery.BigqueryUtils.FieldType;
|
||||
import com.google.domain.registry.util.NonFinalForTesting;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/** Schemas for BigQuery tables. */
|
||||
public final class BigquerySchemas {
|
||||
|
||||
static final ImmutableList<TableFieldSchema> EPPMETRICS_SCHEMA_FIELDS =
|
||||
ImmutableList.<TableFieldSchema>of(
|
||||
new TableFieldSchema().setName("requestId").setType(FieldType.STRING.name()),
|
||||
new TableFieldSchema().setName("startTime").setType(FieldType.TIMESTAMP.name()),
|
||||
new TableFieldSchema().setName("endTime").setType(FieldType.TIMESTAMP.name()),
|
||||
new TableFieldSchema().setName("commandName").setType(FieldType.STRING.name()),
|
||||
new TableFieldSchema().setName("clientId").setType(FieldType.STRING.name()),
|
||||
new TableFieldSchema().setName("privilegeLevel").setType(FieldType.STRING.name()),
|
||||
new TableFieldSchema().setName("eppTarget").setType(FieldType.STRING.name()),
|
||||
new TableFieldSchema().setName("eppStatus").setType(FieldType.INTEGER.name()),
|
||||
new TableFieldSchema().setName("attempts").setType(FieldType.INTEGER.name()));
|
||||
|
||||
public static final String EPPMETRICS_TABLE_ID = "eppMetrics";
|
||||
|
||||
@NonFinalForTesting
|
||||
static Map<String, ImmutableList<TableFieldSchema>> knownTableSchemas =
|
||||
ImmutableMap.of(EPPMETRICS_TABLE_ID, EPPMETRICS_SCHEMA_FIELDS);
|
||||
|
||||
private BigquerySchemas() {}
|
||||
}
|
|
@ -1,148 +0,0 @@
|
|||
// Copyright 2016 The Domain Registry Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package com.google.domain.registry.export;
|
||||
|
||||
import static com.google.common.base.CaseFormat.LOWER_UNDERSCORE;
|
||||
import static com.google.common.base.CaseFormat.UPPER_UNDERSCORE;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import com.google.api.services.bigquery.model.TableFieldSchema;
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.collect.FluentIterable;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.domain.registry.bigquery.BigqueryUtils.FieldMode;
|
||||
import com.google.domain.registry.bigquery.BigqueryUtils.FieldType;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
/** Helper class which acts as a container for Bigquery table schemas. */
|
||||
class BigquerySchemas {
|
||||
private BigquerySchemas() {}
|
||||
|
||||
/** The fields in the "Logs" table. */
|
||||
enum LogsTableField {
|
||||
|
||||
// These fields appear in nested APP_LOG_LINES records.
|
||||
LOG_LEVEL(FieldType.STRING, FieldMode.NULLABLE),
|
||||
LOG_MESSAGE(FieldType.STRING, FieldMode.NULLABLE),
|
||||
TIME(FieldType.TIMESTAMP, FieldMode.NULLABLE),
|
||||
|
||||
// These fields appear in records at top level of the table.
|
||||
START_TIME(FieldType.TIMESTAMP, FieldMode.NULLABLE),
|
||||
END_TIME(FieldType.TIMESTAMP, FieldMode.NULLABLE),
|
||||
RELEASE(FieldType.STRING, FieldMode.NULLABLE),
|
||||
APPID(FieldType.STRING, FieldMode.NULLABLE),
|
||||
COST(FieldType.FLOAT, FieldMode.NULLABLE),
|
||||
EPP_CLIENT_ID(FieldType.STRING, FieldMode.NULLABLE),
|
||||
EPP_COMMAND(FieldType.STRING, FieldMode.NULLABLE),
|
||||
EPP_RESULT(FieldType.BOOLEAN, FieldMode.NULLABLE),
|
||||
EPP_TARGET(FieldType.STRING, FieldMode.REPEATED),
|
||||
EPP_TLD(FieldType.STRING, FieldMode.NULLABLE),
|
||||
HOST(FieldType.STRING, FieldMode.NULLABLE),
|
||||
HTTP_VERSION(FieldType.STRING, FieldMode.NULLABLE),
|
||||
INSTANCE_KEY(FieldType.STRING, FieldMode.NULLABLE),
|
||||
IP(FieldType.STRING, FieldMode.NULLABLE),
|
||||
LATENCY_USEC(FieldType.INTEGER, FieldMode.NULLABLE),
|
||||
MCYCLES(FieldType.INTEGER, FieldMode.NULLABLE),
|
||||
METHOD(FieldType.STRING, FieldMode.NULLABLE),
|
||||
MODULE_ID(FieldType.STRING, FieldMode.NULLABLE),
|
||||
NICKNAME(FieldType.STRING, FieldMode.NULLABLE),
|
||||
OFFSET(FieldType.STRING, FieldMode.NULLABLE),
|
||||
PENDING_TIME_USEC(FieldType.INTEGER, FieldMode.NULLABLE),
|
||||
REFERRER(FieldType.STRING, FieldMode.NULLABLE),
|
||||
REPLICA_INDEX(FieldType.INTEGER, FieldMode.NULLABLE),
|
||||
REQUEST_ID(FieldType.STRING, FieldMode.NULLABLE),
|
||||
RESOURCE(FieldType.STRING, FieldMode.NULLABLE),
|
||||
RESPONSE_SIZE(FieldType.INTEGER, FieldMode.NULLABLE),
|
||||
STATUS(FieldType.INTEGER, FieldMode.NULLABLE),
|
||||
TASK_NAME(FieldType.STRING, FieldMode.NULLABLE),
|
||||
TASK_QUEUE_NAME(FieldType.STRING, FieldMode.NULLABLE),
|
||||
URL_MAP_ENTRY(FieldType.STRING, FieldMode.NULLABLE),
|
||||
USER_AGENT(FieldType.STRING, FieldMode.NULLABLE),
|
||||
VERSION_ID(FieldType.STRING, FieldMode.NULLABLE),
|
||||
APP_LOG_LINES(FieldType.RECORD, FieldMode.REPEATED,
|
||||
ImmutableList.of(LOG_LEVEL, LOG_MESSAGE, TIME));
|
||||
|
||||
private final FieldType fieldType;
|
||||
private final FieldMode fieldMode;
|
||||
private final ImmutableList<LogsTableField> childFields;
|
||||
|
||||
LogsTableField(FieldType fieldType, FieldMode fieldMode) {
|
||||
this(fieldType, fieldMode, ImmutableList.<LogsTableField>of());
|
||||
}
|
||||
|
||||
LogsTableField(
|
||||
FieldType fieldType, FieldMode fieldMode, ImmutableList<LogsTableField> childFields) {
|
||||
this.fieldType = checkNotNull(fieldType);
|
||||
this.fieldMode = checkNotNull(fieldMode);
|
||||
this.childFields = checkNotNull(childFields);
|
||||
}
|
||||
|
||||
/** Return the name of the field as it should appear in the Bigquery schema. */
|
||||
String schemaName() {
|
||||
return UPPER_UNDERSCORE.to(LOWER_UNDERSCORE, name());
|
||||
}
|
||||
|
||||
/** Return the {@link TableFieldSchema} of this field for use in a Bigquery table. */
|
||||
private TableFieldSchema getTableFieldSchema() {
|
||||
TableFieldSchema tableFieldSchema = new TableFieldSchema()
|
||||
.setName(schemaName())
|
||||
.setType(fieldType.schemaName())
|
||||
.setMode(fieldMode.schemaName());
|
||||
if (!childFields.isEmpty()) {
|
||||
tableFieldSchema.setFields(getSchema(childFields));
|
||||
}
|
||||
return tableFieldSchema;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the schema of a list of {@link TableFieldSchema} objects for use in a Bigquery table.
|
||||
*/
|
||||
private static List<TableFieldSchema> getSchema(Iterable<LogsTableField> fields) {
|
||||
return FluentIterable.from(fields)
|
||||
.transform(new Function<LogsTableField, TableFieldSchema>() {
|
||||
@Override
|
||||
public TableFieldSchema apply(LogsTableField field) {
|
||||
return field.getTableFieldSchema();
|
||||
}})
|
||||
.toList();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the schema of this table for use in a Bigquery table.
|
||||
*/
|
||||
static List<TableFieldSchema> getTableSchema() {
|
||||
List<LogsTableField> allFields = Arrays.asList(LogsTableField.values());
|
||||
|
||||
// Collect the list of all child fields so we can exclude them from the list of fields at the
|
||||
// top level of the schema.
|
||||
Set<LogsTableField> childFields = FluentIterable.from(allFields)
|
||||
.transformAndConcat(new Function<LogsTableField, List<LogsTableField>>() {
|
||||
@Override
|
||||
public List<LogsTableField> apply(LogsTableField field) {
|
||||
return field.childFields;
|
||||
}})
|
||||
.toSet();
|
||||
|
||||
Set<LogsTableField> topLevelFields =
|
||||
Sets.difference(ImmutableSet.copyOf(allFields), childFields);
|
||||
return getSchema(topLevelFields);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -21,12 +21,8 @@ import static javax.servlet.http.HttpServletResponse.SC_BAD_REQUEST;
|
|||
import static javax.servlet.http.HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
|
||||
import static javax.servlet.http.HttpServletResponse.SC_OK;
|
||||
|
||||
import com.google.api.client.extensions.appengine.http.UrlFetchTransport;
|
||||
import com.google.api.client.googleapis.extensions.appengine.auth.oauth2.AppIdentityCredential;
|
||||
import com.google.api.client.googleapis.json.GoogleJsonResponseException;
|
||||
import com.google.api.client.json.jackson2.JacksonFactory;
|
||||
import com.google.api.services.bigquery.Bigquery;
|
||||
import com.google.api.services.bigquery.BigqueryScopes;
|
||||
import com.google.api.services.bigquery.model.Table;
|
||||
import com.google.api.services.bigquery.model.TableReference;
|
||||
import com.google.api.services.bigquery.model.ViewDefinition;
|
||||
|
@ -34,7 +30,6 @@ import com.google.appengine.api.taskqueue.TaskOptions;
|
|||
import com.google.appengine.api.taskqueue.TaskOptions.Method;
|
||||
import com.google.common.net.MediaType;
|
||||
import com.google.domain.registry.bigquery.BigqueryFactory;
|
||||
import com.google.domain.registry.bigquery.BigqueryHelper;
|
||||
import com.google.domain.registry.config.RegistryEnvironment;
|
||||
import com.google.domain.registry.util.FormattingLogger;
|
||||
import com.google.domain.registry.util.NonFinalForTesting;
|
||||
|
@ -62,8 +57,6 @@ public class UpdateSnapshotViewServlet extends HttpServlet {
|
|||
|
||||
private static final FormattingLogger logger = FormattingLogger.getLoggerForCallerClass();
|
||||
|
||||
private static final BigqueryHelper bigqueryHelper = new BigqueryHelper();
|
||||
|
||||
@NonFinalForTesting
|
||||
private static BigqueryFactory bigqueryFactory = new BigqueryFactory();
|
||||
|
||||
|
@ -99,15 +92,9 @@ public class UpdateSnapshotViewServlet extends HttpServlet {
|
|||
|
||||
private String updateSnapshotView(String datasetId, String tableId, String kindName)
|
||||
throws IOException {
|
||||
Bigquery bigquery = bigqueryFactory.create(
|
||||
getClass().getSimpleName(),
|
||||
new UrlFetchTransport(),
|
||||
new JacksonFactory(),
|
||||
new AppIdentityCredential(BigqueryScopes.all()));
|
||||
String projectId = ENVIRONMENT.config().getProjectId();
|
||||
|
||||
bigqueryHelper.ensureDataset(
|
||||
bigquery, projectId, ENVIRONMENT.config().getLatestSnapshotDataset());
|
||||
Bigquery bigquery =
|
||||
bigqueryFactory.create(projectId, ENVIRONMENT.config().getLatestSnapshotDataset());
|
||||
|
||||
updateTable(bigquery, new Table()
|
||||
.setTableReference(new TableReference()
|
||||
|
|
|
@ -14,30 +14,14 @@
|
|||
|
||||
package com.google.domain.registry.monitoring.whitebox;
|
||||
|
||||
import com.google.api.services.bigquery.model.TableFieldSchema;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.domain.registry.bigquery.BigqueryUtils.FieldType;
|
||||
import com.google.domain.registry.bigquery.BigquerySchemas;
|
||||
import com.google.domain.registry.model.eppoutput.Result.Code;
|
||||
|
||||
/** The EPP Metrics collector. See {@link Metrics}. */
|
||||
public class EppMetrics extends Metrics {
|
||||
|
||||
public static final ImmutableList<TableFieldSchema> SCHEMA_FIELDS =
|
||||
ImmutableList.<TableFieldSchema>of(
|
||||
new TableFieldSchema().setName("requestId").setType(FieldType.STRING.name()),
|
||||
new TableFieldSchema().setName("startTime").setType(FieldType.TIMESTAMP.name()),
|
||||
new TableFieldSchema().setName("endTime").setType(FieldType.TIMESTAMP.name()),
|
||||
new TableFieldSchema().setName("commandName").setType(FieldType.STRING.name()),
|
||||
new TableFieldSchema().setName("clientId").setType(FieldType.STRING.name()),
|
||||
new TableFieldSchema().setName("privilegeLevel").setType(FieldType.STRING.name()),
|
||||
new TableFieldSchema().setName("eppTarget").setType(FieldType.STRING.name()),
|
||||
new TableFieldSchema().setName("eppStatus").setType(FieldType.INTEGER.name()),
|
||||
new TableFieldSchema().setName("attempts").setType(FieldType.INTEGER.name()));
|
||||
|
||||
public static final String TABLE_ID = "eppMetrics";
|
||||
|
||||
public EppMetrics() {
|
||||
setTableId(TABLE_ID);
|
||||
setTableId(BigquerySchemas.EPPMETRICS_TABLE_ID);
|
||||
fields.put("attempts", 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -14,19 +14,12 @@
|
|||
|
||||
package com.google.domain.registry.monitoring.whitebox;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.domain.registry.util.HttpServletUtils.getRequiredParameterValue;
|
||||
|
||||
import com.google.api.client.extensions.appengine.http.UrlFetchTransport;
|
||||
import com.google.api.client.googleapis.extensions.appengine.auth.oauth2.AppIdentityCredential;
|
||||
import com.google.api.client.json.jackson2.JacksonFactory;
|
||||
import com.google.api.services.bigquery.Bigquery;
|
||||
import com.google.api.services.bigquery.BigqueryScopes;
|
||||
import com.google.api.services.bigquery.model.TableDataInsertAllRequest;
|
||||
import com.google.api.services.bigquery.model.TableDataInsertAllResponse;
|
||||
import com.google.api.services.bigquery.model.TableDataInsertAllResponse.InsertErrors;
|
||||
import com.google.api.services.bigquery.model.TableFieldSchema;
|
||||
import com.google.api.services.bigquery.model.TableReference;
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.collect.FluentIterable;
|
||||
|
@ -35,13 +28,11 @@ import com.google.common.collect.ImmutableMap;
|
|||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.domain.registry.bigquery.BigqueryFactory;
|
||||
import com.google.domain.registry.bigquery.BigqueryHelper;
|
||||
import com.google.domain.registry.config.RegistryEnvironment;
|
||||
import com.google.domain.registry.util.FormattingLogger;
|
||||
import com.google.domain.registry.util.NonFinalForTesting;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.servlet.http.HttpServlet;
|
||||
|
@ -59,25 +50,11 @@ public class MetricsTaskServlet extends HttpServlet {
|
|||
|
||||
private static final Set<String> SPECIAL_PARAMS = ImmutableSet.of("tableId", "insertId");
|
||||
|
||||
// Add any concrete Metric classes to this map or doPost() will throw IllegalArgumentException.
|
||||
private static final Map<String, ImmutableList<TableFieldSchema>> KNOWN_TABLE_SCHEMAS =
|
||||
ImmutableMap.of(EppMetrics.TABLE_ID, EppMetrics.SCHEMA_FIELDS);
|
||||
|
||||
// servlet level cross-request caches to avoid unnecessary RPCs.
|
||||
@NonFinalForTesting
|
||||
private static Set<String> knownTables = Sets.newConcurrentHashSet();
|
||||
|
||||
@NonFinalForTesting
|
||||
private static Set<String> datasets = Sets.newConcurrentHashSet();
|
||||
|
||||
@NonFinalForTesting
|
||||
private static BigqueryFactory bigqueryFactory = new BigqueryFactory();
|
||||
|
||||
@NonFinalForTesting
|
||||
private static BigqueryHelper bigqueryHelper = new BigqueryHelper();
|
||||
|
||||
/** Returns a filtered {@link ImmutableMap} from an {@link HttpServletRequest} */
|
||||
private static ImmutableMap<String, Object> getFiteredMapFromRequest(
|
||||
private static ImmutableMap<String, Object> getFilteredMapFromRequest(
|
||||
HttpServletRequest req,
|
||||
Set<String> filter) {
|
||||
ImmutableMap.Builder<String, Object> b = new ImmutableMap.Builder<>();
|
||||
|
@ -97,35 +74,8 @@ public class MetricsTaskServlet extends HttpServlet {
|
|||
public void doPost(HttpServletRequest req, HttpServletResponse rsp) throws IOException {
|
||||
try {
|
||||
final String tableId = getRequiredParameterValue(req, "tableId");
|
||||
ImmutableMap<String, Object> fields = getFiteredMapFromRequest(req, SPECIAL_PARAMS);
|
||||
|
||||
|
||||
final Bigquery bigquery = bigqueryFactory.create(
|
||||
getClass().getSimpleName(),
|
||||
new UrlFetchTransport(),
|
||||
new JacksonFactory(),
|
||||
new AppIdentityCredential(BigqueryScopes.all()));
|
||||
|
||||
|
||||
// Note: it's safe for multiple threads to call this as the dataset will
|
||||
// only be created once.
|
||||
if (!datasets.contains(DATASET_ID)) {
|
||||
bigqueryHelper.ensureDataset(bigquery, PROJECT_ID, DATASET_ID);
|
||||
datasets.add(DATASET_ID);
|
||||
}
|
||||
|
||||
checkArgument(KNOWN_TABLE_SCHEMAS.containsKey(tableId), "Unknown table ID: %s", tableId);
|
||||
|
||||
if (!knownTables.contains(tableId)) {
|
||||
bigqueryHelper.ensureTable(
|
||||
bigquery,
|
||||
new TableReference()
|
||||
.setDatasetId(DATASET_ID)
|
||||
.setProjectId(PROJECT_ID)
|
||||
.setTableId(tableId),
|
||||
KNOWN_TABLE_SCHEMAS.get(tableId));
|
||||
knownTables.add(tableId);
|
||||
}
|
||||
ImmutableMap<String, Object> fields = getFilteredMapFromRequest(req, SPECIAL_PARAMS);
|
||||
Bigquery bigquery = bigqueryFactory.create(PROJECT_ID, DATASET_ID, tableId);
|
||||
|
||||
TableDataInsertAllResponse response = bigquery.tabledata()
|
||||
.insertAll(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue