mirror of
https://github.com/google/nomulus.git
synced 2025-04-29 19:47:51 +02:00
Improve some log messages for readability/consistency (#1333)
* Improve some log messages for readability/consistency * Address code review comments
This commit is contained in:
parent
3efb2bc509
commit
703c8edd8c
100 changed files with 252 additions and 250 deletions
|
@ -39,7 +39,7 @@ public final class SystemInfo {
|
|||
pid.getOutputStream().close();
|
||||
pid.waitFor();
|
||||
} catch (IOException e) {
|
||||
logger.atWarning().withCause(e).log("%s command not available", cmd);
|
||||
logger.atWarning().withCause(e).log("%s command not available.", cmd);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -93,7 +93,7 @@ public final class DeleteOldCommitLogsAction implements Runnable {
|
|||
public void run() {
|
||||
DateTime deletionThreshold = clock.nowUtc().minus(maxAge);
|
||||
logger.atInfo().log(
|
||||
"Processing asynchronous deletion of unreferenced CommitLogManifests older than %s",
|
||||
"Processing asynchronous deletion of unreferenced CommitLogManifests older than %s.",
|
||||
deletionThreshold);
|
||||
|
||||
mrRunner
|
||||
|
@ -208,7 +208,7 @@ public final class DeleteOldCommitLogsAction implements Runnable {
|
|||
getContext().incrementCounter("EPP resources missing pre-threshold revision (SEE LOGS)");
|
||||
logger.atSevere().log(
|
||||
"EPP resource missing old enough revision: "
|
||||
+ "%s (created on %s) has %d revisions between %s and %s, while threshold is %s",
|
||||
+ "%s (created on %s) has %d revisions between %s and %s, while threshold is %s.",
|
||||
Key.create(eppResource),
|
||||
eppResource.getCreationTime(),
|
||||
eppResource.getRevisions().size(),
|
||||
|
|
|
@ -100,7 +100,7 @@ public final class ExportCommitLogDiffAction implements Runnable {
|
|||
|
||||
// Load the keys of all the manifests to include in this diff.
|
||||
List<Key<CommitLogManifest>> sortedKeys = loadAllDiffKeys(lowerCheckpoint, upperCheckpoint);
|
||||
logger.atInfo().log("Found %d manifests to export", sortedKeys.size());
|
||||
logger.atInfo().log("Found %d manifests to export.", sortedKeys.size());
|
||||
// Open an output channel to GCS, wrapped in a stream for convenience.
|
||||
try (OutputStream gcsStream =
|
||||
gcsUtils.openOutputStream(
|
||||
|
@ -124,7 +124,7 @@ public final class ExportCommitLogDiffAction implements Runnable {
|
|||
for (int i = 0; i < keyChunks.size(); i++) {
|
||||
// Force the async load to finish.
|
||||
Collection<CommitLogManifest> chunkValues = nextChunkToExport.values();
|
||||
logger.atInfo().log("Loaded %d manifests", chunkValues.size());
|
||||
logger.atInfo().log("Loaded %d manifests.", chunkValues.size());
|
||||
// Since there is no hard bound on how much data this might be, take care not to let the
|
||||
// Objectify session cache fill up and potentially run out of memory. This is the only safe
|
||||
// point to do this since at this point there is no async load in progress.
|
||||
|
@ -134,12 +134,12 @@ public final class ExportCommitLogDiffAction implements Runnable {
|
|||
nextChunkToExport = auditedOfy().load().keys(keyChunks.get(i + 1));
|
||||
}
|
||||
exportChunk(gcsStream, chunkValues);
|
||||
logger.atInfo().log("Exported %d manifests", chunkValues.size());
|
||||
logger.atInfo().log("Exported %d manifests.", chunkValues.size());
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
logger.atInfo().log("Exported %d manifests in total", sortedKeys.size());
|
||||
logger.atInfo().log("Exported %d total manifests.", sortedKeys.size());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -94,14 +94,14 @@ class GcsDiffFileLister {
|
|||
logger.atInfo().log(
|
||||
"Gap discovered in sequence terminating at %s, missing file: %s",
|
||||
sequence.lastKey(), filename);
|
||||
logger.atInfo().log("Found sequence from %s to %s", checkpointTime, lastTime);
|
||||
logger.atInfo().log("Found sequence from %s to %s.", checkpointTime, lastTime);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
sequence.put(checkpointTime, blobInfo);
|
||||
checkpointTime = getLowerBoundTime(blobInfo);
|
||||
}
|
||||
logger.atInfo().log("Found sequence from %s to %s", checkpointTime, lastTime);
|
||||
logger.atInfo().log("Found sequence from %s to %s.", checkpointTime, lastTime);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,7 @@ class GcsDiffFileLister {
|
|||
}
|
||||
}
|
||||
if (upperBoundTimesToBlobInfo.isEmpty()) {
|
||||
logger.atInfo().log("No files found");
|
||||
logger.atInfo().log("No files found.");
|
||||
return ImmutableList.of();
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,7 @@ class GcsDiffFileLister {
|
|||
|
||||
logger.atInfo().log(
|
||||
"Actual restore from time: %s", getLowerBoundTime(sequence.firstEntry().getValue()));
|
||||
logger.atInfo().log("Found %d files to restore", sequence.size());
|
||||
logger.atInfo().log("Found %d files to restore.", sequence.size());
|
||||
return ImmutableList.copyOf(sequence.values());
|
||||
}
|
||||
|
||||
|
|
|
@ -273,7 +273,7 @@ public class ReplayCommitLogsToSqlAction implements Runnable {
|
|||
ofyPojo.getClass());
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
logger.atSevere().log("Error when replaying object %s", ofyPojo);
|
||||
logger.atSevere().log("Error when replaying object %s.", ofyPojo);
|
||||
throw t;
|
||||
}
|
||||
}
|
||||
|
@ -300,7 +300,7 @@ public class ReplayCommitLogsToSqlAction implements Runnable {
|
|||
jpaTm().deleteIgnoringReadOnly(entityVKey);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
logger.atSevere().log("Error when deleting key %s", entityVKey);
|
||||
logger.atSevere().log("Error when deleting key %s.", entityVKey);
|
||||
throw t;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -103,13 +103,13 @@ public class RestoreCommitLogsAction implements Runnable {
|
|||
!FORBIDDEN_ENVIRONMENTS.contains(RegistryEnvironment.get()),
|
||||
"DO NOT RUN IN PRODUCTION OR SANDBOX.");
|
||||
if (dryRun) {
|
||||
logger.atInfo().log("Running in dryRun mode");
|
||||
logger.atInfo().log("Running in dry-run mode.");
|
||||
}
|
||||
String gcsBucket = gcsBucketOverride.orElse(defaultGcsBucket);
|
||||
logger.atInfo().log("Restoring from %s.", gcsBucket);
|
||||
List<BlobInfo> diffFiles = diffLister.listDiffFiles(gcsBucket, fromTime, toTime);
|
||||
if (diffFiles.isEmpty()) {
|
||||
logger.atInfo().log("Nothing to restore");
|
||||
logger.atInfo().log("Nothing to restore.");
|
||||
return;
|
||||
}
|
||||
Map<Integer, DateTime> bucketTimestamps = new HashMap<>();
|
||||
|
@ -143,7 +143,7 @@ public class RestoreCommitLogsAction implements Runnable {
|
|||
.build()),
|
||||
Stream.of(CommitLogCheckpointRoot.create(lastCheckpoint.getCheckpointTime())))
|
||||
.collect(toImmutableList()));
|
||||
logger.atInfo().log("Restore complete");
|
||||
logger.atInfo().log("Restore complete.");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -311,7 +311,7 @@ public class DeleteContactsAndHostsAction implements Runnable {
|
|||
@Override
|
||||
public void reduce(final DeletionRequest deletionRequest, ReducerInput<Boolean> values) {
|
||||
final boolean hasNoActiveReferences = !Iterators.contains(values, true);
|
||||
logger.atInfo().log("Processing async deletion request for %s", deletionRequest.key());
|
||||
logger.atInfo().log("Processing async deletion request for %s.", deletionRequest.key());
|
||||
DeletionResult result =
|
||||
tm()
|
||||
.transactNew(
|
||||
|
@ -605,12 +605,12 @@ public class DeleteContactsAndHostsAction implements Runnable {
|
|||
static boolean doesResourceStateAllowDeletion(EppResource resource, DateTime now) {
|
||||
Key<EppResource> key = Key.create(resource);
|
||||
if (isDeleted(resource, now)) {
|
||||
logger.atWarning().log("Cannot asynchronously delete %s because it is already deleted", key);
|
||||
logger.atWarning().log("Cannot asynchronously delete %s because it is already deleted.", key);
|
||||
return false;
|
||||
}
|
||||
if (!resource.getStatusValues().contains(PENDING_DELETE)) {
|
||||
logger.atWarning().log(
|
||||
"Cannot asynchronously delete %s because it is not in PENDING_DELETE", key);
|
||||
"Cannot asynchronously delete %s because it is not in PENDING_DELETE.", key);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -167,7 +167,7 @@ public class DeleteExpiredDomainsAction implements Runnable {
|
|||
|
||||
/** Runs the actual domain delete flow and returns whether the deletion was successful. */
|
||||
private boolean runDomainDeleteFlow(DomainBase domain) {
|
||||
logger.atInfo().log("Attempting to delete domain %s", domain.getDomainName());
|
||||
logger.atInfo().log("Attempting to delete domain '%s'.", domain.getDomainName());
|
||||
// Create a new transaction that the flow's execution will be enlisted in that loads the domain
|
||||
// transactionally. This way we can ensure that nothing else has modified the domain in question
|
||||
// in the intervening period since the query above found it.
|
||||
|
@ -203,7 +203,7 @@ public class DeleteExpiredDomainsAction implements Runnable {
|
|||
|
||||
if (eppOutput.isPresent()) {
|
||||
if (eppOutput.get().isSuccess()) {
|
||||
logger.atInfo().log("Successfully deleted domain %s", domain.getDomainName());
|
||||
logger.atInfo().log("Successfully deleted domain '%s'.", domain.getDomainName());
|
||||
} else {
|
||||
logger.atSevere().log(
|
||||
"Failed to delete domain %s; EPP response:\n\n%s",
|
||||
|
|
|
@ -142,7 +142,7 @@ public class DeleteLoadTestDataAction implements Runnable {
|
|||
// that are linked to domains (since it would break the foreign keys)
|
||||
if (EppResourceUtils.isLinked(contact.createVKey(), clock.nowUtc())) {
|
||||
logger.atWarning().log(
|
||||
"Cannot delete contact with repo ID %s since it is referenced from a domain",
|
||||
"Cannot delete contact with repo ID %s since it is referenced from a domain.",
|
||||
contact.getRepoId());
|
||||
return;
|
||||
}
|
||||
|
@ -177,7 +177,7 @@ public class DeleteLoadTestDataAction implements Runnable {
|
|||
HistoryEntryDao.loadHistoryObjectsForResource(eppResource.createVKey());
|
||||
if (isDryRun) {
|
||||
logger.atInfo().log(
|
||||
"Would delete repo ID %s along with %d history objects",
|
||||
"Would delete repo ID %s along with %d history objects.",
|
||||
eppResource.getRepoId(), historyObjects.size());
|
||||
} else {
|
||||
historyObjects.forEach(tm()::delete);
|
||||
|
|
|
@ -228,7 +228,7 @@ public class DeleteProberDataAction implements Runnable {
|
|||
if (EppResourceUtils.isActive(domain, tm().getTransactionTime())) {
|
||||
if (isDryRun) {
|
||||
logger.atInfo().log(
|
||||
"Would soft-delete the active domain: %s (%s)",
|
||||
"Would soft-delete the active domain: %s (%s).",
|
||||
domain.getDomainName(), domain.getRepoId());
|
||||
} else {
|
||||
softDeleteDomain(domain, registryAdminRegistrarId, dnsQueue);
|
||||
|
@ -237,7 +237,7 @@ public class DeleteProberDataAction implements Runnable {
|
|||
} else {
|
||||
if (isDryRun) {
|
||||
logger.atInfo().log(
|
||||
"Would hard-delete the non-active domain: %s (%s) and its dependents",
|
||||
"Would hard-delete the non-active domain: %s (%s) and its dependents.",
|
||||
domain.getDomainName(), domain.getRepoId());
|
||||
} else {
|
||||
domainRepoIdsToHardDelete.add(domain.getRepoId());
|
||||
|
@ -331,7 +331,7 @@ public class DeleteProberDataAction implements Runnable {
|
|||
getContext().incrementCounter("skipped, non-prober data");
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
logger.atSevere().withCause(t).log("Error while deleting prober data for key %s", key);
|
||||
logger.atSevere().withCause(t).log("Error while deleting prober data for key %s.", key);
|
||||
getContext().incrementCounter(String.format("error, kind %s", key.getKind()));
|
||||
}
|
||||
}
|
||||
|
@ -372,7 +372,7 @@ public class DeleteProberDataAction implements Runnable {
|
|||
if (EppResourceUtils.isActive(domain, now)) {
|
||||
if (isDryRun) {
|
||||
logger.atInfo().log(
|
||||
"Would soft-delete the active domain: %s (%s)", domainName, domainKey);
|
||||
"Would soft-delete the active domain: %s (%s).", domainName, domainKey);
|
||||
} else {
|
||||
tm().transact(() -> softDeleteDomain(domain, registryAdminRegistrarId, dnsQueue));
|
||||
}
|
||||
|
|
|
@ -148,9 +148,9 @@ public class ExpandRecurringBillingEventsAction implements Runnable {
|
|||
.reduce(0, Integer::sum);
|
||||
|
||||
if (!isDryRun) {
|
||||
logger.atInfo().log("Saved OneTime billing events", numBillingEventsSaved);
|
||||
logger.atInfo().log("Saved OneTime billing events.", numBillingEventsSaved);
|
||||
} else {
|
||||
logger.atInfo().log("Generated OneTime billing events (dry run)", numBillingEventsSaved);
|
||||
logger.atInfo().log("Generated OneTime billing events (dry run).", numBillingEventsSaved);
|
||||
}
|
||||
logger.atInfo().log(
|
||||
"Recurring event expansion %s complete for billing event range [%s, %s).",
|
||||
|
|
|
@ -173,7 +173,7 @@ public class RefreshDnsOnHostRenameAction implements Runnable {
|
|||
retrier.callWithRetry(
|
||||
() -> dnsQueue.addDomainRefreshTask(domainName),
|
||||
TransientFailureException.class);
|
||||
logger.atInfo().log("Enqueued DNS refresh for domain %s.", domainName);
|
||||
logger.atInfo().log("Enqueued DNS refresh for domain '%s'.", domainName);
|
||||
});
|
||||
deleteTasksWithRetry(
|
||||
refreshRequests,
|
||||
|
|
|
@ -313,7 +313,7 @@ public class RelockDomainAction implements Runnable {
|
|||
builder.add(new InternetAddress(registryLockEmailAddress));
|
||||
} catch (AddressException e) {
|
||||
// This shouldn't stop any other emails going out, so swallow it
|
||||
logger.atWarning().log("Invalid email address %s", registryLockEmailAddress);
|
||||
logger.atWarning().log("Invalid email address '%s'.", registryLockEmailAddress);
|
||||
}
|
||||
}
|
||||
return builder.build();
|
||||
|
|
|
@ -43,7 +43,7 @@ public class RegistryPipelineWorkerInitializer implements JvmInitializer {
|
|||
if (environment == null || environment.equals(RegistryEnvironment.UNITTEST)) {
|
||||
return;
|
||||
}
|
||||
logger.atInfo().log("Setting up RegistryEnvironment: %s", environment);
|
||||
logger.atInfo().log("Setting up RegistryEnvironment %s.", environment);
|
||||
environment.setup();
|
||||
Lazy<JpaTransactionManager> transactionManagerLazy =
|
||||
toRegistryPipelineComponent(registryOptions).getJpaTransactionManager();
|
||||
|
|
|
@ -169,14 +169,15 @@ public class DatastoreV1 {
|
|||
int numSplits;
|
||||
try {
|
||||
long estimatedSizeBytes = getEstimatedSizeBytes(datastore, query, namespace);
|
||||
logger.atInfo().log("Estimated size bytes for the query is: %s", estimatedSizeBytes);
|
||||
logger.atInfo().log("Estimated size for the query is %d bytes.", estimatedSizeBytes);
|
||||
numSplits =
|
||||
(int)
|
||||
Math.min(
|
||||
NUM_QUERY_SPLITS_MAX,
|
||||
Math.round(((double) estimatedSizeBytes) / DEFAULT_BUNDLE_SIZE_BYTES));
|
||||
} catch (Exception e) {
|
||||
logger.atWarning().log("Failed the fetch estimatedSizeBytes for query: %s", query, e);
|
||||
logger.atWarning().withCause(e).log(
|
||||
"Failed the fetch estimatedSizeBytes for query: %s", query);
|
||||
// Fallback in case estimated size is unavailable.
|
||||
numSplits = NUM_QUERY_SPLITS_MIN;
|
||||
}
|
||||
|
@ -215,7 +216,7 @@ public class DatastoreV1 {
|
|||
private static Entity getLatestTableStats(
|
||||
String ourKind, @Nullable String namespace, Datastore datastore) throws DatastoreException {
|
||||
long latestTimestamp = queryLatestStatisticsTimestamp(datastore, namespace);
|
||||
logger.atInfo().log("Latest stats timestamp for kind %s is %s", ourKind, latestTimestamp);
|
||||
logger.atInfo().log("Latest stats timestamp for kind %s is %s.", ourKind, latestTimestamp);
|
||||
|
||||
Query.Builder queryBuilder = Query.newBuilder();
|
||||
if (Strings.isNullOrEmpty(namespace)) {
|
||||
|
@ -234,7 +235,7 @@ public class DatastoreV1 {
|
|||
long now = System.currentTimeMillis();
|
||||
RunQueryResponse response = datastore.runQuery(request);
|
||||
logger.atFine().log(
|
||||
"Query for per-kind statistics took %sms", System.currentTimeMillis() - now);
|
||||
"Query for per-kind statistics took %d ms.", System.currentTimeMillis() - now);
|
||||
|
||||
QueryResultBatch batch = response.getBatch();
|
||||
if (batch.getEntityResultsCount() == 0) {
|
||||
|
@ -330,7 +331,7 @@ public class DatastoreV1 {
|
|||
logger.atWarning().log(
|
||||
"Failed to translate Gql query '%s': %s", gqlQueryWithZeroLimit, e.getMessage());
|
||||
logger.atWarning().log(
|
||||
"User query might have a limit already set, so trying without zero limit");
|
||||
"User query might have a limit already set, so trying without zero limit.");
|
||||
// Retry without the zero limit.
|
||||
return translateGqlQuery(gql, datastore, namespace);
|
||||
} else {
|
||||
|
@ -514,10 +515,10 @@ public class DatastoreV1 {
|
|||
@ProcessElement
|
||||
public void processElement(ProcessContext c) throws Exception {
|
||||
String gqlQuery = c.element();
|
||||
logger.atInfo().log("User query: '%s'", gqlQuery);
|
||||
logger.atInfo().log("User query: '%s'.", gqlQuery);
|
||||
Query query =
|
||||
translateGqlQueryWithLimitCheck(gqlQuery, datastore, v1Options.getNamespace());
|
||||
logger.atInfo().log("User gql query translated to Query(%s)", query);
|
||||
logger.atInfo().log("User gql query translated to Query(%s).", query);
|
||||
c.output(query);
|
||||
}
|
||||
}
|
||||
|
@ -573,7 +574,7 @@ public class DatastoreV1 {
|
|||
estimatedNumSplits = numSplits;
|
||||
}
|
||||
|
||||
logger.atInfo().log("Splitting the query into %s splits", estimatedNumSplits);
|
||||
logger.atInfo().log("Splitting the query into %d splits.", estimatedNumSplits);
|
||||
List<Query> querySplits;
|
||||
try {
|
||||
querySplits =
|
||||
|
@ -647,7 +648,7 @@ public class DatastoreV1 {
|
|||
throw exception;
|
||||
}
|
||||
if (!BackOffUtils.next(sleeper, backoff)) {
|
||||
logger.atSevere().log("Aborting after %s retries.", MAX_RETRIES);
|
||||
logger.atSevere().log("Aborting after %d retries.", MAX_RETRIES);
|
||||
throw exception;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -172,7 +172,7 @@ public class RdeIO {
|
|||
|
||||
// Write a gigantic XML file to GCS. We'll start by opening encrypted out/err file handles.
|
||||
|
||||
logger.atInfo().log("Writing %s and %s", xmlFilename, xmlLengthFilename);
|
||||
logger.atInfo().log("Writing files '%s' and '%s'.", xmlFilename, xmlLengthFilename);
|
||||
try (OutputStream gcsOutput = gcsUtils.openOutputStream(xmlFilename);
|
||||
OutputStream lengthOutput = gcsUtils.openOutputStream(xmlLengthFilename);
|
||||
OutputStream ghostrydeEncoder = Ghostryde.encoder(gcsOutput, stagingKey, lengthOutput);
|
||||
|
@ -219,7 +219,7 @@ public class RdeIO {
|
|||
//
|
||||
// This will be sent to ICANN once we're done uploading the big XML to the escrow provider.
|
||||
if (mode == RdeMode.FULL) {
|
||||
logger.atInfo().log("Writing %s", reportFilename);
|
||||
logger.atInfo().log("Writing file '%s'.", reportFilename);
|
||||
try (OutputStream gcsOutput = gcsUtils.openOutputStream(reportFilename);
|
||||
OutputStream ghostrydeEncoder = Ghostryde.encoder(gcsOutput, stagingKey)) {
|
||||
counter.makeReport(id, watermark, header, revision).marshal(ghostrydeEncoder, UTF_8);
|
||||
|
@ -229,7 +229,7 @@ public class RdeIO {
|
|||
}
|
||||
// Now that we're done, output roll the cursor forward.
|
||||
if (key.manual()) {
|
||||
logger.atInfo().log("Manual operation; not advancing cursor or enqueuing upload task");
|
||||
logger.atInfo().log("Manual operation; not advancing cursor or enqueuing upload task.");
|
||||
} else {
|
||||
outputReceiver.output(KV.of(key, revision));
|
||||
}
|
||||
|
@ -265,7 +265,7 @@ public class RdeIO {
|
|||
key);
|
||||
tm().put(Cursor.create(key.cursor(), newPosition, registry));
|
||||
logger.atInfo().log(
|
||||
"Rolled forward %s on %s cursor to %s", key.cursor(), key.tld(), newPosition);
|
||||
"Rolled forward %s on %s cursor to %s.", key.cursor(), key.tld(), newPosition);
|
||||
RdeRevision.saveRevision(key.tld(), key.watermark(), key.mode(), revision);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -218,7 +218,7 @@ public class SafeBrowsingTransforms {
|
|||
throws JSONException, IOException {
|
||||
int statusCode = response.getStatusLine().getStatusCode();
|
||||
if (statusCode != SC_OK) {
|
||||
logger.atWarning().log("Got unexpected status code %s from response", statusCode);
|
||||
logger.atWarning().log("Got unexpected status code %s from response.", statusCode);
|
||||
} else {
|
||||
// Unpack the response body
|
||||
JSONObject responseBody =
|
||||
|
@ -227,7 +227,7 @@ public class SafeBrowsingTransforms {
|
|||
new InputStreamReader(response.getEntity().getContent(), UTF_8)));
|
||||
logger.atInfo().log("Got response: %s", responseBody.toString());
|
||||
if (responseBody.length() == 0) {
|
||||
logger.atInfo().log("Response was empty, no threats detected");
|
||||
logger.atInfo().log("Response was empty, no threats detected.");
|
||||
} else {
|
||||
// Emit all DomainNameInfos with their API results.
|
||||
JSONArray threatMatches = responseBody.getJSONArray("matches");
|
||||
|
|
|
@ -632,7 +632,7 @@ public class BigqueryConnection implements AutoCloseable {
|
|||
private static String summarizeCompletedJob(Job job) {
|
||||
JobStatistics stats = job.getStatistics();
|
||||
return String.format(
|
||||
"Job took %,.3f seconds after a %,.3f second delay and processed %,d bytes (%s)",
|
||||
"Job took %,.3f seconds after a %,.3f second delay and processed %,d bytes (%s).",
|
||||
(stats.getEndTime() - stats.getStartTime()) / 1000.0,
|
||||
(stats.getStartTime() - stats.getCreationTime()) / 1000.0,
|
||||
stats.getTotalBytesProcessed(),
|
||||
|
@ -706,17 +706,17 @@ public class BigqueryConnection implements AutoCloseable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Helper that creates a dataset with this name if it doesn't already exist, and returns true
|
||||
* if creation took place.
|
||||
* Helper that creates a dataset with this name if it doesn't already exist, and returns true if
|
||||
* creation took place.
|
||||
*/
|
||||
public boolean createDatasetIfNeeded(String datasetName) throws IOException {
|
||||
private boolean createDatasetIfNeeded(String datasetName) throws IOException {
|
||||
if (!checkDatasetExists(datasetName)) {
|
||||
bigquery.datasets()
|
||||
.insert(getProjectId(), new Dataset().setDatasetReference(new DatasetReference()
|
||||
.setProjectId(getProjectId())
|
||||
.setDatasetId(datasetName)))
|
||||
.execute();
|
||||
logger.atInfo().log("Created dataset: %s:%s\n", getProjectId(), datasetName);
|
||||
logger.atInfo().log("Created dataset: %s: %s.", getProjectId(), datasetName);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -732,9 +732,8 @@ public class BigqueryConnection implements AutoCloseable {
|
|||
.setDefaultDataset(getDataset())
|
||||
.setDestinationTable(table))));
|
||||
} catch (BigqueryJobFailureException e) {
|
||||
if (e.getReason().equals("duplicate")) {
|
||||
// Table already exists.
|
||||
} else {
|
||||
if (!e.getReason().equals("duplicate")) {
|
||||
// Throw if it failed for any reason other than table already existing.
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -116,7 +116,7 @@ public class CheckedBigquery {
|
|||
.setTableReference(table))
|
||||
.execute();
|
||||
logger.atInfo().log(
|
||||
"Created BigQuery table %s:%s.%s",
|
||||
"Created BigQuery table %s:%s.%s.",
|
||||
table.getProjectId(), table.getDatasetId(), table.getTableId());
|
||||
} catch (IOException e) {
|
||||
// Swallow errors about a table that exists, and throw any other ones.
|
||||
|
|
|
@ -141,7 +141,7 @@ public final class TldFanoutAction implements Runnable {
|
|||
StringBuilder outputPayload =
|
||||
new StringBuilder(
|
||||
String.format("OK: Launched the following %d tasks in queue %s\n", tlds.size(), queue));
|
||||
logger.atInfo().log("Launching %d tasks in queue %s", tlds.size(), queue);
|
||||
logger.atInfo().log("Launching %d tasks in queue %s.", tlds.size(), queue);
|
||||
if (tlds.isEmpty()) {
|
||||
logger.atWarning().log("No TLDs to fan-out!");
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ public final class TldFanoutAction implements Runnable {
|
|||
"- Task: '%s', tld: '%s', endpoint: '%s'\n",
|
||||
createdTask.getName(), tld, createdTask.getAppEngineHttpRequest().getRelativeUri()));
|
||||
logger.atInfo().log(
|
||||
"Task: '%s', tld: '%s', endpoint: '%s'",
|
||||
"Task: '%s', tld: '%s', endpoint: '%s'.",
|
||||
createdTask.getName(), tld, createdTask.getAppEngineHttpRequest().getRelativeUri());
|
||||
}
|
||||
response.setContentType(PLAIN_TEXT_UTF_8);
|
||||
|
|
|
@ -107,7 +107,7 @@ public class DnsQueue {
|
|||
private TaskHandle addToQueue(
|
||||
TargetType targetType, String targetName, String tld, Duration countdown) {
|
||||
logger.atInfo().log(
|
||||
"Adding task type=%s, target=%s, tld=%s to pull queue %s (%d tasks currently on queue)",
|
||||
"Adding task type=%s, target=%s, tld=%s to pull queue %s (%d tasks currently on queue).",
|
||||
targetType, targetName, tld, DNS_PULL_QUEUE_NAME, queue.fetchStatistics().getNumTasks());
|
||||
return queue.add(
|
||||
TaskOptions.Builder.withDefaults()
|
||||
|
@ -166,7 +166,7 @@ public class DnsQueue {
|
|||
"There are %d tasks in the DNS queue '%s'.", numTasks, DNS_PULL_QUEUE_NAME);
|
||||
return queue.leaseTasks(leaseDuration.getMillis(), MILLISECONDS, leaseTasksBatchSize);
|
||||
} catch (TransientFailureException | DeadlineExceededException e) {
|
||||
logger.atSevere().withCause(e).log("Failed leasing tasks too fast");
|
||||
logger.atSevere().withCause(e).log("Failed leasing tasks too fast.");
|
||||
return ImmutableList.of();
|
||||
}
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ public class DnsQueue {
|
|||
try {
|
||||
queue.deleteTask(tasks);
|
||||
} catch (TransientFailureException | DeadlineExceededException e) {
|
||||
logger.atSevere().withCause(e).log("Failed deleting tasks too fast");
|
||||
logger.atSevere().withCause(e).log("Failed deleting tasks too fast.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ public final class PublishDnsUpdatesAction implements Runnable, Callable<Void> {
|
|||
new Duration(enqueuedTime, now));
|
||||
logger.atInfo().log(
|
||||
"publishDnsWriter latency statistics: TLD: %s, dnsWriter: %s, actionStatus: %s, "
|
||||
+ "numItems: %d, timeSinceCreation: %s, timeInQueue: %s",
|
||||
+ "numItems: %d, timeSinceCreation: %s, timeInQueue: %s.",
|
||||
tld,
|
||||
dnsWriter,
|
||||
status,
|
||||
|
@ -144,7 +144,7 @@ public final class PublishDnsUpdatesAction implements Runnable, Callable<Void> {
|
|||
|
||||
/** Adds all the domains and hosts in the batch back to the queue to be processed later. */
|
||||
private void requeueBatch() {
|
||||
logger.atInfo().log("Requeueing batch for retry");
|
||||
logger.atInfo().log("Requeueing batch for retry.");
|
||||
for (String domain : nullToEmpty(domains)) {
|
||||
dnsQueue.addDomainRefreshTask(domain);
|
||||
}
|
||||
|
@ -158,14 +158,14 @@ public final class PublishDnsUpdatesAction implements Runnable, Callable<Void> {
|
|||
// LockIndex should always be within [1, numPublishLocks]
|
||||
if (lockIndex > numPublishLocks || lockIndex <= 0) {
|
||||
logger.atSevere().log(
|
||||
"Lock index should be within [1,%d], got %d instead", numPublishLocks, lockIndex);
|
||||
"Lock index should be within [1,%d], got %d instead.", numPublishLocks, lockIndex);
|
||||
return false;
|
||||
}
|
||||
// Check if the Registry object's num locks has changed since this task was batched
|
||||
int registryNumPublishLocks = Registry.get(tld).getNumDnsPublishLocks();
|
||||
if (registryNumPublishLocks != numPublishLocks) {
|
||||
logger.atWarning().log(
|
||||
"Registry numDnsPublishLocks %d out of sync with parameter %d",
|
||||
"Registry numDnsPublishLocks %d out of sync with parameter %d.",
|
||||
registryNumPublishLocks, numPublishLocks);
|
||||
return false;
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ public final class PublishDnsUpdatesAction implements Runnable, Callable<Void> {
|
|||
DnsWriter writer = dnsWriterProxy.getByClassNameForTld(dnsWriter, tld);
|
||||
|
||||
if (writer == null) {
|
||||
logger.atWarning().log("Couldn't get writer %s for TLD %s", dnsWriter, tld);
|
||||
logger.atWarning().log("Couldn't get writer %s for TLD %s.", dnsWriter, tld);
|
||||
recordActionResult(ActionStatus.BAD_WRITER);
|
||||
requeueBatch();
|
||||
return;
|
||||
|
@ -190,11 +190,11 @@ public final class PublishDnsUpdatesAction implements Runnable, Callable<Void> {
|
|||
for (String domain : nullToEmpty(domains)) {
|
||||
if (!DomainNameUtils.isUnder(
|
||||
InternetDomainName.from(domain), InternetDomainName.from(tld))) {
|
||||
logger.atSevere().log("%s: skipping domain %s not under tld", tld, domain);
|
||||
logger.atSevere().log("%s: skipping domain %s not under TLD.", tld, domain);
|
||||
domainsRejected += 1;
|
||||
} else {
|
||||
writer.publishDomain(domain);
|
||||
logger.atInfo().log("%s: published domain %s", tld, domain);
|
||||
logger.atInfo().log("%s: published domain %s.", tld, domain);
|
||||
domainsPublished += 1;
|
||||
}
|
||||
}
|
||||
|
@ -206,11 +206,11 @@ public final class PublishDnsUpdatesAction implements Runnable, Callable<Void> {
|
|||
for (String host : nullToEmpty(hosts)) {
|
||||
if (!DomainNameUtils.isUnder(
|
||||
InternetDomainName.from(host), InternetDomainName.from(tld))) {
|
||||
logger.atSevere().log("%s: skipping host %s not under tld", tld, host);
|
||||
logger.atSevere().log("%s: skipping host %s not under TLD.", tld, host);
|
||||
hostsRejected += 1;
|
||||
} else {
|
||||
writer.publishHost(host);
|
||||
logger.atInfo().log("%s: published host %s", tld, host);
|
||||
logger.atInfo().log("%s: published host %s.", tld, host);
|
||||
hostsPublished += 1;
|
||||
}
|
||||
}
|
||||
|
@ -233,7 +233,7 @@ public final class PublishDnsUpdatesAction implements Runnable, Callable<Void> {
|
|||
tld, dnsWriter, commitStatus, duration, domainsPublished, hostsPublished);
|
||||
logger.atInfo().log(
|
||||
"writer.commit() statistics: TLD: %s, dnsWriter: %s, commitStatus: %s, duration: %s, "
|
||||
+ "domainsPublished: %d, domainsRejected: %d, hostsPublished: %d, hostsRejected: %d",
|
||||
+ "domainsPublished: %d, domainsRejected: %d, hostsPublished: %d, hostsRejected: %d.",
|
||||
tld,
|
||||
dnsWriter,
|
||||
commitStatus,
|
||||
|
|
|
@ -180,11 +180,11 @@ public class CloudDnsWriter extends BaseDnsWriter {
|
|||
|
||||
desiredRecords.put(absoluteDomainName, domainRecords.build());
|
||||
logger.atFine().log(
|
||||
"Will write %d records for domain %s", domainRecords.build().size(), absoluteDomainName);
|
||||
"Will write %d records for domain '%s'.", domainRecords.build().size(), absoluteDomainName);
|
||||
}
|
||||
|
||||
private void publishSubordinateHost(String hostName) {
|
||||
logger.atInfo().log("Publishing glue records for %s", hostName);
|
||||
logger.atInfo().log("Publishing glue records for host '%s'.", hostName);
|
||||
// Canonicalize name
|
||||
String absoluteHostName = getAbsoluteHostName(hostName);
|
||||
|
||||
|
@ -250,7 +250,7 @@ public class CloudDnsWriter extends BaseDnsWriter {
|
|||
|
||||
// Host not managed by our registry, no need to update DNS.
|
||||
if (!tld.isPresent()) {
|
||||
logger.atSevere().log("publishHost called for invalid host %s", hostName);
|
||||
logger.atSevere().log("publishHost called for invalid host '%s'.", hostName);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -273,7 +273,7 @@ public class CloudDnsWriter extends BaseDnsWriter {
|
|||
ImmutableMap<String, ImmutableSet<ResourceRecordSet>> desiredRecordsCopy =
|
||||
ImmutableMap.copyOf(desiredRecords);
|
||||
retrier.callWithRetry(() -> mutateZone(desiredRecordsCopy), ZoneStateException.class);
|
||||
logger.atInfo().log("Wrote to Cloud DNS");
|
||||
logger.atInfo().log("Wrote to Cloud DNS.");
|
||||
}
|
||||
|
||||
/** Returns the glue records for in-bailiwick nameservers for the given domain+records. */
|
||||
|
@ -329,7 +329,7 @@ public class CloudDnsWriter extends BaseDnsWriter {
|
|||
*/
|
||||
private Map<String, List<ResourceRecordSet>> getResourceRecordsForDomains(
|
||||
Set<String> domainNames) {
|
||||
logger.atFine().log("Fetching records for %s", domainNames);
|
||||
logger.atFine().log("Fetching records for domain '%s'.", domainNames);
|
||||
// As per Concurrent.transform() - if numThreads or domainNames.size() < 2, it will not use
|
||||
// threading.
|
||||
return ImmutableMap.copyOf(
|
||||
|
@ -381,11 +381,11 @@ public class CloudDnsWriter extends BaseDnsWriter {
|
|||
ImmutableSet<ResourceRecordSet> intersection =
|
||||
Sets.intersection(additions, deletions).immutableCopy();
|
||||
logger.atInfo().log(
|
||||
"There are %d common items out of the %d items in 'additions' and %d items in 'deletions'",
|
||||
"There are %d common items out of the %d items in 'additions' and %d items in 'deletions'.",
|
||||
intersection.size(), additions.size(), deletions.size());
|
||||
// Exit early if we have nothing to update - dnsConnection doesn't work on empty changes
|
||||
if (additions.equals(deletions)) {
|
||||
logger.atInfo().log("Returning early because additions is the same as deletions");
|
||||
logger.atInfo().log("Returning early because additions are the same as deletions.");
|
||||
return;
|
||||
}
|
||||
Change change =
|
||||
|
|
|
@ -80,7 +80,7 @@ public class BackupDatastoreAction implements Runnable {
|
|||
logger.atInfo().log(message);
|
||||
response.setPayload(message);
|
||||
} catch (Throwable e) {
|
||||
throw new InternalServerErrorException("Exception occurred while backing up datastore.", e);
|
||||
throw new InternalServerErrorException("Exception occurred while backing up Datastore", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -118,10 +118,10 @@ public class BigqueryPollJobAction implements Runnable {
|
|||
|
||||
// Check if the job ended with an error.
|
||||
if (job.getStatus().getErrorResult() != null) {
|
||||
logger.atSevere().log("Bigquery job failed - %s - %s", jobRefString, job);
|
||||
logger.atSevere().log("Bigquery job failed - %s - %s.", jobRefString, job);
|
||||
return false;
|
||||
}
|
||||
logger.atInfo().log("Bigquery job succeeded - %s", jobRefString);
|
||||
logger.atInfo().log("Bigquery job succeeded - %s.", jobRefString);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -171,10 +171,10 @@ public class CheckBackupAction implements Runnable {
|
|||
ImmutableSet.copyOf(intersection(backup.getKinds(), kindsToLoad));
|
||||
String message = String.format("Datastore backup %s complete - ", backupName);
|
||||
if (exportedKindsToLoad.isEmpty()) {
|
||||
message += "no kinds to load into BigQuery";
|
||||
message += "no kinds to load into BigQuery.";
|
||||
} else {
|
||||
enqueueUploadBackupTask(backupId, backup.getExportFolderUrl(), exportedKindsToLoad);
|
||||
message += "BigQuery load task enqueued";
|
||||
message += "BigQuery load task enqueued.";
|
||||
}
|
||||
logger.atInfo().log(message);
|
||||
response.setPayload(message);
|
||||
|
|
|
@ -85,7 +85,7 @@ public class ExportDomainListsAction implements Runnable {
|
|||
@Override
|
||||
public void run() {
|
||||
ImmutableSet<String> realTlds = getTldsOfType(TldType.REAL);
|
||||
logger.atInfo().log("Exporting domain lists for tlds %s", realTlds);
|
||||
logger.atInfo().log("Exporting domain lists for TLDs %s.", realTlds);
|
||||
if (tm().isOfy()) {
|
||||
mrRunner
|
||||
.setJobName("Export domain lists")
|
||||
|
@ -145,7 +145,7 @@ public class ExportDomainListsAction implements Runnable {
|
|||
Registry registry = Registry.get(tld);
|
||||
if (registry.getDriveFolderId() == null) {
|
||||
logger.atInfo().log(
|
||||
"Skipping registered domains export for TLD %s because Drive folder isn't specified",
|
||||
"Skipping registered domains export for TLD %s because Drive folder isn't specified.",
|
||||
tld);
|
||||
} else {
|
||||
String resultMsg =
|
||||
|
|
|
@ -110,11 +110,11 @@ public class ExportPremiumTermsAction implements Runnable {
|
|||
private Optional<String> checkConfig(Registry registry) {
|
||||
if (isNullOrEmpty(registry.getDriveFolderId())) {
|
||||
logger.atInfo().log(
|
||||
"Skipping premium terms export for TLD %s because Drive folder isn't specified", tld);
|
||||
"Skipping premium terms export for TLD %s because Drive folder isn't specified.", tld);
|
||||
return Optional.of("Skipping export because no Drive folder is associated with this TLD");
|
||||
}
|
||||
if (!registry.getPremiumListName().isPresent()) {
|
||||
logger.atInfo().log("No premium terms to export for TLD %s", tld);
|
||||
logger.atInfo().log("No premium terms to export for TLD '%s'.", tld);
|
||||
return Optional.of("No premium lists configured");
|
||||
}
|
||||
return Optional.empty();
|
||||
|
|
|
@ -65,11 +65,11 @@ public class ExportReservedTermsAction implements Runnable {
|
|||
String resultMsg;
|
||||
if (registry.getReservedListNames().isEmpty() && isNullOrEmpty(registry.getDriveFolderId())) {
|
||||
resultMsg = "No reserved lists configured";
|
||||
logger.atInfo().log("No reserved terms to export for TLD %s", tld);
|
||||
logger.atInfo().log("No reserved terms to export for TLD '%s'.", tld);
|
||||
} else if (registry.getDriveFolderId() == null) {
|
||||
resultMsg = "Skipping export because no Drive folder is associated with this TLD";
|
||||
logger.atInfo().log(
|
||||
"Skipping reserved terms export for TLD %s because Drive folder isn't specified", tld);
|
||||
"Skipping reserved terms export for TLD %s because Drive folder isn't specified.", tld);
|
||||
} else {
|
||||
resultMsg = driveConnection.createOrUpdateFile(
|
||||
RESERVED_TERMS_FILENAME,
|
||||
|
|
|
@ -194,7 +194,7 @@ public final class SyncGroupMembersAction implements Runnable {
|
|||
}
|
||||
}
|
||||
logger.atInfo().log(
|
||||
"Successfully synced contacts for registrar %s: added %d and removed %d",
|
||||
"Successfully synced contacts for registrar %s: added %d and removed %d.",
|
||||
registrar.getRegistrarId(), totalAdded, totalRemoved);
|
||||
} catch (IOException e) {
|
||||
// Package up exception and re-throw with attached additional relevant info.
|
||||
|
|
|
@ -150,8 +150,7 @@ public class UpdateSnapshotViewAction implements Runnable {
|
|||
if (e.getDetails() != null && e.getDetails().getCode() == 404) {
|
||||
bigquery.tables().insert(ref.getProjectId(), ref.getDatasetId(), table).execute();
|
||||
} else {
|
||||
logger.atWarning().withCause(e).log(
|
||||
"UpdateSnapshotViewAction failed, caught exception %s", e.getDetails());
|
||||
logger.atWarning().withCause(e).log("UpdateSnapshotViewAction errored out.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ public class UploadDatastoreBackupAction implements Runnable {
|
|||
String message = uploadBackup(backupId, backupFolderUrl, Splitter.on(',').split(backupKinds));
|
||||
logger.atInfo().log("Loaded backup successfully: %s", message);
|
||||
} catch (Throwable e) {
|
||||
logger.atSevere().withCause(e).log("Error loading backup");
|
||||
logger.atSevere().withCause(e).log("Error loading backup.");
|
||||
if (e instanceof IllegalArgumentException) {
|
||||
throw new BadRequestException("Error calling load backup: " + e.getMessage(), e);
|
||||
} else {
|
||||
|
@ -148,12 +148,12 @@ public class UploadDatastoreBackupAction implements Runnable {
|
|||
getQueue(UpdateSnapshotViewAction.QUEUE));
|
||||
|
||||
builder.append(String.format(" - %s:%s\n", projectId, jobId));
|
||||
logger.atInfo().log("Submitted load job %s:%s", projectId, jobId);
|
||||
logger.atInfo().log("Submitted load job %s:%s.", projectId, jobId);
|
||||
}
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
static String sanitizeForBigquery(String backupId) {
|
||||
private static String sanitizeForBigquery(String backupId) {
|
||||
return backupId.replaceAll("[^a-zA-Z0-9_]", "_");
|
||||
}
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ class SheetSynchronizer {
|
|||
BatchUpdateValuesResponse response =
|
||||
sheetsService.spreadsheets().values().batchUpdate(spreadsheetId, updateRequest).execute();
|
||||
Integer cellsUpdated = response.getTotalUpdatedCells();
|
||||
logger.atInfo().log("Updated %d originalVals", cellsUpdated != null ? cellsUpdated : 0);
|
||||
logger.atInfo().log("Updated %d originalVals.", cellsUpdated != null ? cellsUpdated : 0);
|
||||
}
|
||||
|
||||
// Append extra rows if necessary
|
||||
|
@ -140,7 +140,7 @@ class SheetSynchronizer {
|
|||
.setInsertDataOption("INSERT_ROWS")
|
||||
.execute();
|
||||
logger.atInfo().log(
|
||||
"Appended %d rows to range %s",
|
||||
"Appended %d rows to range %s.",
|
||||
data.size() - originalVals.size(), appendResponse.getTableRange());
|
||||
// Clear the extra rows if necessary
|
||||
} else if (data.size() < originalVals.size()) {
|
||||
|
@ -155,7 +155,7 @@ class SheetSynchronizer {
|
|||
new ClearValuesRequest())
|
||||
.execute();
|
||||
logger.atInfo().log(
|
||||
"Cleared %d rows from range %s",
|
||||
"Cleared %d rows from range %s.",
|
||||
originalVals.size() - data.size(), clearResponse.getClearedRange());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -150,7 +150,7 @@ public class CheckApiAction implements Runnable {
|
|||
return fail(e.getResult().getMsg());
|
||||
} catch (Exception e) {
|
||||
metricBuilder.status(UNKNOWN_ERROR);
|
||||
logger.atWarning().withCause(e).log("Unknown error");
|
||||
logger.atWarning().withCause(e).log("Unknown error.");
|
||||
return fail("Invalid request");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -130,12 +130,12 @@ public final class EppController {
|
|||
} catch (EppException | EppExceptionInProviderException e) {
|
||||
// The command failed. Send the client an error message, but only log at INFO since many of
|
||||
// these failures are innocuous or due to client error, so there's nothing we have to change.
|
||||
logger.atInfo().withCause(e).log("Flow returned failure response");
|
||||
logger.atInfo().withCause(e).log("Flow returned failure response.");
|
||||
EppException eppEx = (EppException) (e instanceof EppException ? e : e.getCause());
|
||||
return getErrorResponse(eppEx.getResult(), flowComponent.trid());
|
||||
} catch (Throwable e) {
|
||||
// Something bad and unexpected happened. Send the client a generic error, and log at SEVERE.
|
||||
logger.atSevere().withCause(e).log("Unexpected failure in flow execution");
|
||||
logger.atSevere().withCause(e).log("Unexpected failure in flow execution.");
|
||||
return getErrorResponse(Result.create(Code.COMMAND_FAILED), flowComponent.trid());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -84,7 +84,7 @@ public class EppRequestHandler {
|
|||
response.setHeader(ProxyHttpHeaders.LOGGED_IN, "true");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.atWarning().withCause(e).log("handleEppCommand general exception");
|
||||
logger.atWarning().withCause(e).log("handleEppCommand general exception.");
|
||||
response.setStatus(SC_BAD_REQUEST);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -100,7 +100,7 @@ public final class ExtensionManager {
|
|||
throw new UndeclaredServiceExtensionException(undeclaredUrisThatError);
|
||||
}
|
||||
logger.atInfo().log(
|
||||
"Client %s is attempting to run %s without declaring URIs %s on login",
|
||||
"Client %s is attempting to run %s without declaring URIs %s on login.",
|
||||
registrarId, flowClass.getSimpleName(), undeclaredUris);
|
||||
}
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ public class LoginFlow implements Flow {
|
|||
}
|
||||
|
||||
/** Run the flow without bothering to log errors. The {@link #run} method will do that for us. */
|
||||
public final EppResponse runWithoutLogging() throws EppException {
|
||||
private final EppResponse runWithoutLogging() throws EppException {
|
||||
extensionManager.validate(); // There are no legal extensions for this flow.
|
||||
Login login = (Login) eppInput.getCommandWrapper().getCommand();
|
||||
if (!registrarId.isEmpty()) {
|
||||
|
|
|
@ -141,7 +141,7 @@ public class GcsUtils implements Serializable {
|
|||
Blob blob = storage().get(blobId);
|
||||
return blob != null && blob.getSize() > 0;
|
||||
} catch (StorageException e) {
|
||||
logger.atWarning().withCause(e).log("Failed to check if GCS file exists");
|
||||
logger.atWarning().withCause(e).log("Failure while checking if GCS file exists.");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -256,7 +256,7 @@ public class LoadTestAction implements Runnable {
|
|||
}
|
||||
ImmutableList<TaskOptions> taskOptions = tasks.build();
|
||||
enqueue(taskOptions);
|
||||
logger.atInfo().log("Added %d total load test tasks", taskOptions.size());
|
||||
logger.atInfo().log("Added %d total load test tasks.", taskOptions.size());
|
||||
}
|
||||
|
||||
private void validateAndLogRequest() {
|
||||
|
|
|
@ -58,7 +58,7 @@ public class UnlockerOutput<O> extends Output<O, Lock> {
|
|||
|
||||
@Override
|
||||
public Lock finish(Collection<? extends OutputWriter<O>> writers) {
|
||||
logger.atInfo().log("Mapreduce finished; releasing lock: %s", lock);
|
||||
logger.atInfo().log("Mapreduce finished; releasing lock '%s'.", lock);
|
||||
lock.release();
|
||||
return lock;
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ class EppResourceEntityReader<R extends EppResource> extends EppResourceBaseRead
|
|||
Key<? extends EppResource> key = nextQueryResult().getKey();
|
||||
EppResource resource = auditedOfy().load().key(key).now();
|
||||
if (resource == null) {
|
||||
logger.atSevere().log("EppResourceIndex key %s points at a missing resource", key);
|
||||
logger.atSevere().log("EppResourceIndex key %s points at a missing resource.", key);
|
||||
continue;
|
||||
}
|
||||
// Postfilter to distinguish polymorphic types (e.g. EppResources).
|
||||
|
|
|
@ -283,7 +283,7 @@ public class Ofy {
|
|||
}
|
||||
sleeper.sleepUninterruptibly(Duration.millis(sleepMillis));
|
||||
logger.atInfo().withCause(e).log(
|
||||
"Retrying %s, attempt %d", e.getClass().getSimpleName(), attempt);
|
||||
"Retrying %s, attempt %d.", e.getClass().getSimpleName(), attempt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -107,7 +107,7 @@ public class ReplicateToDatastoreAction implements Runnable {
|
|||
*/
|
||||
@VisibleForTesting
|
||||
public void applyTransaction(TransactionEntity txnEntity) {
|
||||
logger.atInfo().log("Applying a single transaction Cloud SQL -> Cloud Datastore");
|
||||
logger.atInfo().log("Applying a single transaction Cloud SQL -> Cloud Datastore.");
|
||||
try (UpdateAutoTimestamp.DisableAutoUpdateResource disabler =
|
||||
UpdateAutoTimestamp.disableAutoUpdate()) {
|
||||
ofyTm()
|
||||
|
@ -136,21 +136,21 @@ public class ReplicateToDatastoreAction implements Runnable {
|
|||
}
|
||||
|
||||
logger.atInfo().log(
|
||||
"Applying transaction %s to Cloud Datastore", txnEntity.getId());
|
||||
"Applying transaction %s to Cloud Datastore.", txnEntity.getId());
|
||||
|
||||
// At this point, we know txnEntity is the correct next transaction, so write it
|
||||
// to datastore.
|
||||
// to Datastore.
|
||||
try {
|
||||
Transaction.deserialize(txnEntity.getContents()).writeToDatastore();
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Error during transaction deserialization.", e);
|
||||
throw new RuntimeException("Error during transaction deserialization", e);
|
||||
}
|
||||
|
||||
// Write the updated last transaction id to datastore as part of this datastore
|
||||
// Write the updated last transaction id to Datastore as part of this Datastore
|
||||
// transaction.
|
||||
auditedOfy().save().entity(lastSqlTxn.cloneWithNewTransactionId(nextTxnId));
|
||||
logger.atInfo().log(
|
||||
"Finished applying single transaction Cloud SQL -> Cloud Datastore");
|
||||
"Finished applying single transaction Cloud SQL -> Cloud Datastore.");
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -180,16 +180,16 @@ public class ReplicateToDatastoreAction implements Runnable {
|
|||
return;
|
||||
}
|
||||
try {
|
||||
logger.atInfo().log("Processing transaction replay batch Cloud SQL -> Cloud Datastore");
|
||||
logger.atInfo().log("Processing transaction replay batch Cloud SQL -> Cloud Datastore.");
|
||||
int numTransactionsReplayed = replayAllTransactions();
|
||||
String resultMessage =
|
||||
String.format(
|
||||
"Replayed %d transaction(s) from Cloud SQL -> Datastore", numTransactionsReplayed);
|
||||
"Replayed %d transaction(s) from Cloud SQL -> Datastore.", numTransactionsReplayed);
|
||||
logger.atInfo().log(resultMessage);
|
||||
response.setPayload(resultMessage);
|
||||
response.setStatus(SC_OK);
|
||||
} catch (Throwable t) {
|
||||
String message = "Errored out replaying files";
|
||||
String message = "Errored out replaying files.";
|
||||
logger.atSevere().withCause(t).log(message);
|
||||
response.setStatus(SC_INTERNAL_SERVER_ERROR);
|
||||
response.setPayload(message);
|
||||
|
|
|
@ -30,10 +30,10 @@ public class ReservedListDao {
|
|||
/** Persist a new reserved list to Cloud SQL. */
|
||||
public static void save(ReservedList reservedList) {
|
||||
checkArgumentNotNull(reservedList, "Must specify reservedList");
|
||||
logger.atInfo().log("Saving reserved list %s to Cloud SQL", reservedList.getName());
|
||||
logger.atInfo().log("Saving reserved list %s to Cloud SQL.", reservedList.getName());
|
||||
jpaTm().transact(() -> jpaTm().insert(reservedList));
|
||||
logger.atInfo().log(
|
||||
"Saved reserved list %s with %d entries to Cloud SQL",
|
||||
"Saved reserved list %s with %d entries to Cloud SQL.",
|
||||
reservedList.getName(), reservedList.getReservedListEntries().size());
|
||||
}
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ public class ServletBase extends HttpServlet {
|
|||
// registered if metric reporter starts up correctly.
|
||||
try {
|
||||
metricReporter.get().startAsync().awaitRunning(java.time.Duration.ofSeconds(10));
|
||||
logger.atInfo().log("Started up MetricReporter");
|
||||
logger.atInfo().log("Started up MetricReporter.");
|
||||
LifecycleManager.getInstance()
|
||||
.setShutdownHook(
|
||||
() -> {
|
||||
|
@ -60,7 +60,7 @@ public class ServletBase extends HttpServlet {
|
|||
.get()
|
||||
.stopAsync()
|
||||
.awaitTerminated(java.time.Duration.ofSeconds(10));
|
||||
logger.atInfo().log("Shut down MetricReporter");
|
||||
logger.atInfo().log("Shut down MetricReporter.");
|
||||
} catch (TimeoutException e) {
|
||||
logger.atSevere().withCause(e).log("Failed to stop MetricReporter.");
|
||||
}
|
||||
|
@ -72,13 +72,13 @@ public class ServletBase extends HttpServlet {
|
|||
|
||||
@Override
|
||||
public void service(HttpServletRequest req, HttpServletResponse rsp) throws IOException {
|
||||
logger.atInfo().log("Received %s request", getClass().getSimpleName());
|
||||
logger.atInfo().log("Received %s request.", getClass().getSimpleName());
|
||||
DateTime startTime = clock.nowUtc();
|
||||
try {
|
||||
requestHandler.handleRequest(req, rsp);
|
||||
} finally {
|
||||
logger.atInfo().log(
|
||||
"Finished %s request. Latency: %.3fs",
|
||||
"Finished %s request. Latency: %.3fs.",
|
||||
getClass().getSimpleName(), (clock.nowUtc().getMillis() - startTime.getMillis()) / 1000d);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -303,7 +303,7 @@ public abstract class PersistenceModule {
|
|||
overrides.put(Environment.PASS, credential.password());
|
||||
} catch (Throwable e) {
|
||||
// TODO(b/184631990): after SQL becomes primary, throw an exception to fail fast
|
||||
logger.atSevere().withCause(e).log("Failed to get SQL credential from Secret Manager");
|
||||
logger.atSevere().withCause(e).log("Failed to get SQL credential from Secret Manager.");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -186,10 +186,10 @@ public class JpaTransactionManagerImpl implements JpaTransactionManager {
|
|||
// Error is unchecked!
|
||||
try {
|
||||
txn.rollback();
|
||||
logger.atWarning().log("Error during transaction; transaction rolled back");
|
||||
logger.atWarning().log("Error during transaction; transaction rolled back.");
|
||||
} catch (Throwable rollbackException) {
|
||||
logger.atSevere().withCause(rollbackException).log(
|
||||
"Rollback failed; suppressing error");
|
||||
"Rollback failed; suppressing error.");
|
||||
}
|
||||
throw e;
|
||||
} finally {
|
||||
|
@ -218,9 +218,9 @@ public class JpaTransactionManagerImpl implements JpaTransactionManager {
|
|||
// Error is unchecked!
|
||||
try {
|
||||
txn.rollback();
|
||||
logger.atWarning().log("Error during transaction; transaction rolled back");
|
||||
logger.atWarning().log("Error during transaction; transaction rolled back.");
|
||||
} catch (Throwable rollbackException) {
|
||||
logger.atSevere().withCause(rollbackException).log("Rollback failed; suppressing error");
|
||||
logger.atSevere().withCause(rollbackException).log("Rollback failed; suppressing error.");
|
||||
}
|
||||
throw e;
|
||||
} finally {
|
||||
|
|
|
@ -139,7 +139,7 @@ public abstract class RdapActionBase implements Runnable {
|
|||
pathProper.startsWith(getActionPath()),
|
||||
"%s doesn't start with %s", pathProper, getActionPath());
|
||||
String pathSearchString = pathProper.substring(getActionPath().length());
|
||||
logger.atInfo().log("path search string: '%s'", pathSearchString);
|
||||
logger.atInfo().log("path search string: '%s'.", pathSearchString);
|
||||
|
||||
ReplyPayloadBase replyObject =
|
||||
getJsonObjectForResource(pathSearchString, requestMethod == Action.Method.HEAD);
|
||||
|
@ -153,14 +153,14 @@ public abstract class RdapActionBase implements Runnable {
|
|||
setPayload(replyObject);
|
||||
metricInformationBuilder.setStatusCode(SC_OK);
|
||||
} catch (HttpException e) {
|
||||
logger.atInfo().withCause(e).log("Error in RDAP");
|
||||
logger.atInfo().withCause(e).log("Error in RDAP.");
|
||||
setError(e.getResponseCode(), e.getResponseCodeString(), e.getMessage());
|
||||
} catch (URISyntaxException | IllegalArgumentException e) {
|
||||
logger.atInfo().withCause(e).log("Bad request in RDAP");
|
||||
logger.atInfo().withCause(e).log("Bad request in RDAP.");
|
||||
setError(SC_BAD_REQUEST, "Bad Request", "Not a valid " + getHumanReadableObjectTypeName());
|
||||
} catch (RuntimeException e) {
|
||||
setError(SC_INTERNAL_SERVER_ERROR, "Internal Server Error", "An error was encountered");
|
||||
logger.atSevere().withCause(e).log("Exception encountered while processing RDAP command");
|
||||
logger.atSevere().withCause(e).log("Exception encountered while processing RDAP command.");
|
||||
}
|
||||
rdapMetrics.updateMetrics(metricInformationBuilder.build());
|
||||
}
|
||||
|
|
|
@ -448,7 +448,7 @@ public class RdapDomainSearchAction extends RdapSearchActionBase {
|
|||
if (hostKey != null) {
|
||||
builder.add(hostKey);
|
||||
} else {
|
||||
logger.atWarning().log("Host key unexpectedly null");
|
||||
logger.atWarning().log("Host key unexpectedly null.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -332,7 +332,7 @@ public class RdapJsonFormatter {
|
|||
builder.statusBuilder().addAll(status);
|
||||
if (status.isEmpty()) {
|
||||
logger.atWarning().log(
|
||||
"Domain %s (ROID %s) doesn't have any status",
|
||||
"Domain %s (ROID %s) doesn't have any status.",
|
||||
domainBase.getDomainName(), domainBase.getRepoId());
|
||||
}
|
||||
// RDAP Response Profile 2.6.3, must have a notice about statuses. That is in {@link
|
||||
|
@ -741,7 +741,7 @@ public class RdapJsonFormatter {
|
|||
if (registrarContacts.stream()
|
||||
.noneMatch(contact -> contact.roles().contains(RdapEntity.Role.ABUSE))) {
|
||||
logger.atWarning().log(
|
||||
"Registrar '%s' (IANA ID %s) is missing ABUSE contact",
|
||||
"Registrar '%s' (IANA ID %s) is missing ABUSE contact.",
|
||||
registrar.getRegistrarId(), registrar.getIanaIdentifier());
|
||||
}
|
||||
builder.entitiesBuilder().addAll(registrarContacts);
|
||||
|
|
|
@ -92,7 +92,7 @@ public final class UpdateRegistrarRdapBaseUrlsAction implements Runnable {
|
|||
UpdateRegistrarRdapBaseUrlsAction() {}
|
||||
|
||||
private String loginAndGetId(HttpRequestFactory requestFactory, String tld) throws IOException {
|
||||
logger.atInfo().log("Logging in to MoSAPI");
|
||||
logger.atInfo().log("Logging in to MoSAPI.");
|
||||
HttpRequest request =
|
||||
requestFactory.buildGetRequest(new GenericUrl(String.format(LOGIN_URL, tld)));
|
||||
request.getHeaders().setBasicAuthentication(String.format("%s_ry", tld), password);
|
||||
|
@ -176,15 +176,14 @@ public final class UpdateRegistrarRdapBaseUrlsAction implements Runnable {
|
|||
} catch (Throwable e) {
|
||||
// Login failures are bad but not unexpected for certain TLDs. We shouldn't store those
|
||||
// but rather should only store useful Throwables.
|
||||
logger.atWarning().log("Error logging in to MoSAPI server: " + e.getMessage());
|
||||
logger.atWarning().withCause(e).log("Error logging in to MoSAPI server.");
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
return getRdapBaseUrlsPerIanaIdWithTld(tld, id, requestFactory);
|
||||
} catch (Throwable throwable) {
|
||||
logger.atWarning().log(
|
||||
String.format(
|
||||
"Error retrieving RDAP urls with TLD %s: %s", tld, throwable.getMessage()));
|
||||
logger.atWarning().withCause(throwable).log(
|
||||
"Error retrieving RDAP URLs for TLD '%s'.", tld);
|
||||
finalThrowable = throwable;
|
||||
}
|
||||
}
|
||||
|
@ -215,7 +214,7 @@ public final class UpdateRegistrarRdapBaseUrlsAction implements Runnable {
|
|||
// If this registrar already has these values, skip it
|
||||
if (registrar.getRdapBaseUrls().equals(baseUrls)) {
|
||||
logger.atInfo().log(
|
||||
"No change in RdapBaseUrls for registrar %s (ianaId %s)",
|
||||
"No change in RdapBaseUrls for registrar %s (ianaId %s).",
|
||||
registrar.getRegistrarId(), ianaId);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ public final class BrdaCopyAction implements Runnable {
|
|||
|
||||
long xmlLength = readXmlLength(xmlLengthFilename);
|
||||
|
||||
logger.atInfo().log("Writing %s and %s", rydeFile, sigFile);
|
||||
logger.atInfo().log("Writing files '%s' and '%s'.", rydeFile, sigFile);
|
||||
try (InputStream gcsInput = gcsUtils.openInputStream(xmlFilename);
|
||||
InputStream ghostrydeDecoder = Ghostryde.decoder(gcsInput, stagingDecryptionKey);
|
||||
OutputStream rydeOut = gcsUtils.openOutputStream(rydeFile);
|
||||
|
|
|
@ -88,7 +88,7 @@ class EscrowTaskRunner {
|
|||
final Duration interval) {
|
||||
Callable<Void> lockRunner =
|
||||
() -> {
|
||||
logger.atInfo().log("TLD: %s", registry.getTld());
|
||||
logger.atInfo().log("Performing escrow for TLD '%s'.", registry.getTld());
|
||||
DateTime startOfToday = clock.nowUtc().withTimeAtStartOfDay();
|
||||
DateTime nextRequiredRun =
|
||||
transactIfJpaTm(
|
||||
|
@ -100,7 +100,7 @@ class EscrowTaskRunner {
|
|||
if (nextRequiredRun.isAfter(startOfToday)) {
|
||||
throw new NoContentException("Already completed");
|
||||
}
|
||||
logger.atInfo().log("Current cursor is: %s", nextRequiredRun);
|
||||
logger.atInfo().log("Current cursor is: %s.", nextRequiredRun);
|
||||
task.runWithLock(nextRequiredRun);
|
||||
DateTime nextRun = nextRequiredRun.plus(interval);
|
||||
logger.atInfo().log("Rolling cursor forward to %s.", nextRun);
|
||||
|
|
|
@ -225,7 +225,7 @@ public final class RdeStagingAction implements Runnable {
|
|||
ImmutableSetMultimap<String, PendingDeposit> pendings =
|
||||
manual ? getManualPendingDeposits() : getStandardPendingDeposits();
|
||||
if (pendings.isEmpty()) {
|
||||
String message = "Nothing needs to be deposited";
|
||||
String message = "Nothing needs to be deposited.";
|
||||
logger.atInfo().log(message);
|
||||
response.setStatus(SC_NO_CONTENT);
|
||||
response.setPayload(message);
|
||||
|
|
|
@ -99,12 +99,12 @@ public final class RdeStagingReducer extends Reducer<PendingDeposit, DepositFrag
|
|||
};
|
||||
String lockName = String.format("RdeStaging %s", key.mode());
|
||||
if (!lockHandler.executeWithLocks(lockRunner, key.tld(), lockTimeout, lockName)) {
|
||||
logger.atWarning().log("Lock in use: %s", lockName);
|
||||
logger.atWarning().log("Lock '%s' in use.", lockName);
|
||||
}
|
||||
}
|
||||
|
||||
private void reduceWithLock(final PendingDeposit key, Iterator<DepositFragment> fragments) {
|
||||
logger.atInfo().log("RdeStagingReducer %s", key);
|
||||
logger.atInfo().log("RdeStagingReducer %s.", key);
|
||||
|
||||
// Normally this is done by BackendServlet but it's not present in MapReduceServlet.
|
||||
Security.addProvider(new BouncyCastleProvider());
|
||||
|
@ -140,8 +140,7 @@ public final class RdeStagingReducer extends Reducer<PendingDeposit, DepositFrag
|
|||
XjcRdeHeader header;
|
||||
|
||||
// Write a gigantic XML file to GCS. We'll start by opening encrypted out/err file handles.
|
||||
|
||||
logger.atInfo().log("Writing %s and %s", xmlFilename, xmlLengthFilename);
|
||||
logger.atInfo().log("Writing files '%s' and '%s'.", xmlFilename, xmlLengthFilename);
|
||||
try (OutputStream gcsOutput = gcsUtils.openOutputStream(xmlFilename);
|
||||
OutputStream lengthOutput = gcsUtils.openOutputStream(xmlLengthFilename);
|
||||
OutputStream ghostrydeEncoder = Ghostryde.encoder(gcsOutput, stagingKey, lengthOutput);
|
||||
|
@ -189,7 +188,7 @@ public final class RdeStagingReducer extends Reducer<PendingDeposit, DepositFrag
|
|||
//
|
||||
// This will be sent to ICANN once we're done uploading the big XML to the escrow provider.
|
||||
if (mode == RdeMode.FULL) {
|
||||
logger.atInfo().log("Writing %s", reportFilename);
|
||||
logger.atInfo().log("Writing report file '%s'.", reportFilename);
|
||||
try (OutputStream gcsOutput = gcsUtils.openOutputStream(reportFilename);
|
||||
OutputStream ghostrydeEncoder = Ghostryde.encoder(gcsOutput, stagingKey)) {
|
||||
counter.makeReport(id, watermark, header, revision).marshal(ghostrydeEncoder, UTF_8);
|
||||
|
@ -200,7 +199,7 @@ public final class RdeStagingReducer extends Reducer<PendingDeposit, DepositFrag
|
|||
|
||||
// Now that we're done, kick off RdeUploadAction and roll the cursor forward.
|
||||
if (key.manual()) {
|
||||
logger.atInfo().log("Manual operation; not advancing cursor or enqueuing upload task");
|
||||
logger.atInfo().log("Manual operation; not advancing cursor or enqueuing upload task.");
|
||||
return;
|
||||
}
|
||||
tm().transact(
|
||||
|
@ -225,7 +224,7 @@ public final class RdeStagingReducer extends Reducer<PendingDeposit, DepositFrag
|
|||
key);
|
||||
tm().put(Cursor.create(key.cursor(), newPosition, registry));
|
||||
logger.atInfo().log(
|
||||
"Rolled forward %s on %s cursor to %s", key.cursor(), tld, newPosition);
|
||||
"Rolled forward %s on %s cursor to %s.", key.cursor(), tld, newPosition);
|
||||
RdeRevision.saveRevision(tld, watermark, mode, revision);
|
||||
if (mode == RdeMode.FULL) {
|
||||
taskQueueUtils.enqueue(
|
||||
|
|
|
@ -231,13 +231,13 @@ public final class RdeUploadAction implements Runnable, EscrowTask {
|
|||
.setFileMetadata(name, xmlLength, watermark)
|
||||
.build()) {
|
||||
long bytesCopied = ByteStreams.copy(ghostrydeDecoder, rydeEncoder);
|
||||
logger.atInfo().log("uploaded %,d bytes: %s", bytesCopied, rydeFilename);
|
||||
logger.atInfo().log("Uploaded %,d bytes to path '%s'.", bytesCopied, rydeFilename);
|
||||
}
|
||||
String sigFilename = name + ".sig";
|
||||
byte[] signature = sigOut.toByteArray();
|
||||
gcsUtils.createFromBytes(BlobId.of(bucket, sigFilename), signature);
|
||||
ftpChan.get().put(new ByteArrayInputStream(signature), sigFilename);
|
||||
logger.atInfo().log("uploaded %,d bytes: %s", signature.length, sigFilename);
|
||||
logger.atInfo().log("Uploaded %,d bytes to path '%s'.", signature.length, sigFilename);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -91,7 +91,7 @@ public final class CopyDetailReportsAction implements Runnable {
|
|||
.filter(objectName -> objectName.startsWith(BillingModule.DETAIL_REPORT_PREFIX))
|
||||
.collect(ImmutableList.toImmutableList());
|
||||
} catch (IOException e) {
|
||||
logger.atSevere().withCause(e).log("Copying registrar detail report failed");
|
||||
logger.atSevere().withCause(e).log("Copying registrar detail report failed.");
|
||||
response.setStatus(SC_INTERNAL_SERVER_ERROR);
|
||||
response.setContentType(MediaType.PLAIN_TEXT_UTF_8);
|
||||
response.setPayload(String.format("Failure, encountered %s", e.getMessage()));
|
||||
|
@ -106,12 +106,12 @@ public final class CopyDetailReportsAction implements Runnable {
|
|||
Optional<Registrar> registrar = Registrar.loadByRegistrarId(registrarId);
|
||||
if (!registrar.isPresent()) {
|
||||
logger.atWarning().log(
|
||||
"Registrar %s not found in database for file %s", registrar, detailReportName);
|
||||
"Registrar %s not found in database for file '%s'.", registrar, detailReportName);
|
||||
continue;
|
||||
}
|
||||
String driveFolderId = registrar.get().getDriveFolderId();
|
||||
if (driveFolderId == null) {
|
||||
logger.atWarning().log("Drive folder id not found for registrar %s", registrarId);
|
||||
logger.atWarning().log("Drive folder id not found for registrar '%s'.", registrarId);
|
||||
continue;
|
||||
}
|
||||
// Attempt to copy each detail report to its associated registrar's drive folder.
|
||||
|
|
|
@ -111,7 +111,7 @@ public class GenerateInvoicesAction implements Runnable {
|
|||
@Override
|
||||
public void run() {
|
||||
response.setContentType(MediaType.PLAIN_TEXT_UTF_8);
|
||||
logger.atInfo().log("Launching invoicing pipeline for %s", yearMonth);
|
||||
logger.atInfo().log("Launching invoicing pipeline for %s.", yearMonth);
|
||||
try {
|
||||
LaunchFlexTemplateParameter parameter =
|
||||
new LaunchFlexTemplateParameter()
|
||||
|
@ -152,7 +152,7 @@ public class GenerateInvoicesAction implements Runnable {
|
|||
response.setStatus(SC_OK);
|
||||
response.setPayload(String.format("Launched invoicing pipeline: %s", jobId));
|
||||
} catch (IOException e) {
|
||||
logger.atWarning().withCause(e).log("Pipeline Launch failed");
|
||||
logger.atWarning().withCause(e).log("Template Launch failed.");
|
||||
emailUtils.sendAlertEmail(String.format("Pipeline Launch failed due to %s", e.getMessage()));
|
||||
response.setStatus(SC_INTERNAL_SERVER_ERROR);
|
||||
response.setPayload(String.format("Pipeline launch failed: %s", e.getMessage()));
|
||||
|
|
|
@ -86,7 +86,8 @@ public class IcannHttpReporter {
|
|||
|
||||
HttpResponse response = null;
|
||||
logger.atInfo().log(
|
||||
"Sending report to %s with content length %d", uploadUrl, request.getContent().getLength());
|
||||
"Sending report to %s with content length %d.",
|
||||
uploadUrl, request.getContent().getLength());
|
||||
boolean success = true;
|
||||
try {
|
||||
response = request.execute();
|
||||
|
|
|
@ -72,7 +72,7 @@ public class IcannReportingStager {
|
|||
/**
|
||||
* Creates and stores reports of a given type on GCS.
|
||||
*
|
||||
* <p>This is factored out to facilitate choosing which reports to upload,
|
||||
* <p>This is factored out to facilitate choosing which reports to upload.
|
||||
*/
|
||||
ImmutableList<String> stageReports(YearMonth yearMonth, String subdir, ReportType reportType)
|
||||
throws Exception {
|
||||
|
@ -90,11 +90,11 @@ public class IcannReportingStager {
|
|||
createIntermediaryTableView(entry.getKey(), entry.getValue(), reportType);
|
||||
}
|
||||
|
||||
// Get an in-memory table of the aggregate query's result
|
||||
// Get an in-memory table of the aggregate query's result.
|
||||
ImmutableTable<Integer, TableFieldSchema, Object> reportTable =
|
||||
bigquery.queryToLocalTableSync(queryBuilder.getReportQuery(yearMonth));
|
||||
|
||||
// Get report headers from the table schema and convert into CSV format
|
||||
// Get report headers from the table schema and convert into CSV format.
|
||||
String headerRow = constructRow(getHeaders(reportTable.columnKeySet()));
|
||||
|
||||
return (reportType == ReportType.ACTIVITY)
|
||||
|
@ -104,8 +104,8 @@ public class IcannReportingStager {
|
|||
|
||||
private void createIntermediaryTableView(String queryName, String query, ReportType reportType)
|
||||
throws ExecutionException, InterruptedException {
|
||||
// Later views depend on the results of earlier ones, so query everything synchronously
|
||||
logger.atInfo().log("Generating intermediary view %s", queryName);
|
||||
// Later views depend on the results of earlier ones, so query everything synchronously.
|
||||
logger.atInfo().log("Generating intermediary view %s.", queryName);
|
||||
bigquery
|
||||
.startQuery(
|
||||
query,
|
||||
|
@ -262,7 +262,7 @@ public class IcannReportingStager {
|
|||
final BlobId gcsFilename =
|
||||
BlobId.of(reportingBucket, String.format("%s/%s", subdir, reportFilename));
|
||||
gcsUtils.createFromBytes(gcsFilename, reportBytes);
|
||||
logger.atInfo().log("Wrote %d bytes to file location %s", reportBytes.length, gcsFilename);
|
||||
logger.atInfo().log("Wrote %d bytes to file location '%s'.", reportBytes.length, gcsFilename);
|
||||
return reportFilename;
|
||||
}
|
||||
|
||||
|
@ -273,6 +273,6 @@ public class IcannReportingStager {
|
|||
StringBuilder manifestString = new StringBuilder();
|
||||
filenames.forEach((filename) -> manifestString.append(filename).append("\n"));
|
||||
gcsUtils.createFromBytes(gcsFilename, manifestString.toString().getBytes(UTF_8));
|
||||
logger.atInfo().log("Wrote %d filenames to manifest at %s", filenames.size(), gcsFilename);
|
||||
logger.atInfo().log("Wrote %d filenames to manifest at '%s'.", filenames.size(), gcsFilename);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ public final class IcannReportingStagingAction implements Runnable {
|
|||
response.setContentType(MediaType.PLAIN_TEXT_UTF_8);
|
||||
response.setPayload("Completed staging action.");
|
||||
|
||||
logger.atInfo().log("Enqueueing report upload :");
|
||||
logger.atInfo().log("Enqueueing report upload.");
|
||||
TaskOptions uploadTask =
|
||||
TaskOptions.Builder.withUrl(IcannReportingUploadAction.PATH)
|
||||
.method(Method.POST)
|
||||
|
|
|
@ -148,7 +148,7 @@ public final class IcannReportingUploadAction implements Runnable {
|
|||
String filename = getFileName(cursorType, cursorTime, tldStr);
|
||||
final BlobId gcsFilename =
|
||||
BlobId.of(reportingBucket, String.format("%s/%s", reportSubdir, filename));
|
||||
logger.atInfo().log("Reading ICANN report %s from bucket %s", filename, reportingBucket);
|
||||
logger.atInfo().log("Reading ICANN report %s from bucket '%s'.", filename, reportingBucket);
|
||||
// Check that the report exists
|
||||
try {
|
||||
verifyFileExists(gcsFilename);
|
||||
|
@ -177,7 +177,7 @@ public final class IcannReportingUploadAction implements Runnable {
|
|||
},
|
||||
IcannReportingUploadAction::isUploadFailureRetryable);
|
||||
} catch (RuntimeException e) {
|
||||
logger.atWarning().withCause(e).log("Upload to %s failed", gcsFilename);
|
||||
logger.atWarning().withCause(e).log("Upload to %s failed.", gcsFilename);
|
||||
}
|
||||
reportSummaryBuilder.put(filename, success);
|
||||
|
||||
|
|
|
@ -140,7 +140,7 @@ public class GenerateSpec11ReportAction implements Runnable {
|
|||
response.setStatus(SC_OK);
|
||||
response.setPayload(String.format("Launched Spec11 pipeline: %s", jobId));
|
||||
} catch (IOException e) {
|
||||
logger.atWarning().withCause(e).log("Pipeline Launch failed");
|
||||
logger.atWarning().withCause(e).log("Template Launch failed.");
|
||||
response.setStatus(SC_INTERNAL_SERVER_ERROR);
|
||||
response.setPayload(String.format("Pipeline launch failed: %s", e.getMessage()));
|
||||
}
|
||||
|
|
|
@ -117,7 +117,7 @@ public class Spec11EmailUtils {
|
|||
for (int i = 1; i < failedMatches.size(); i++) {
|
||||
logger.atSevere().withCause(failedMatchesList.get(i).getValue()).log(
|
||||
"Additional exception thrown when sending email to registrar %s, in addition to the"
|
||||
+ " re-thrown exception",
|
||||
+ " re-thrown exception.",
|
||||
failedMatchesList.get(i).getKey().clientId());
|
||||
}
|
||||
throw new RuntimeException(
|
||||
|
|
|
@ -85,7 +85,7 @@ public class Spec11RegistrarThreatMatchesParser {
|
|||
if (gcsUtils.existsAndNotEmpty(gcsFilename)) {
|
||||
return Optional.of(yesterday);
|
||||
}
|
||||
logger.atWarning().log("Could not find previous file from date %s", yesterday);
|
||||
logger.atWarning().log("Could not find previous file from date %s.", yesterday);
|
||||
|
||||
for (LocalDate dateToCheck = yesterday.minusDays(1);
|
||||
!dateToCheck.isBefore(date.minusMonths(1));
|
||||
|
|
|
@ -60,7 +60,7 @@ class RequestMetrics {
|
|||
String.valueOf(authLevel),
|
||||
String.valueOf(success));
|
||||
logger.atInfo().log(
|
||||
"Action called for path=%s, method=%s, authLevel=%s, success=%s. Took: %.3fs",
|
||||
"Action called for path=%s, method=%s, authLevel=%s, success=%s. Took: %.3fs.",
|
||||
path, method, authLevel, success, duration.getMillis() / 1000d);
|
||||
}
|
||||
|
||||
|
|
|
@ -275,7 +275,7 @@ public class AuthenticatedRegistrarAccessor {
|
|||
} catch (RuntimeException e) {
|
||||
logger.atSevere().withCause(e).log(
|
||||
"Error checking whether email %s belongs to support group %s."
|
||||
+ " Skipping support role check",
|
||||
+ " Skipping support role check.",
|
||||
userEmail, gSuiteSupportGroupEmailAddress);
|
||||
return false;
|
||||
}
|
||||
|
@ -316,7 +316,7 @@ public class AuthenticatedRegistrarAccessor {
|
|||
UserAuthInfo userAuthInfo = authResult.userAuthInfo().get();
|
||||
User user = userAuthInfo.user();
|
||||
ImmutableSetMultimap.Builder<String, Role> builder = new ImmutableSetMultimap.Builder<>();
|
||||
logger.atInfo().log("Checking registrar contacts for user ID %s", user.getUserId());
|
||||
logger.atInfo().log("Checking registrar contacts for user ID %s.", user.getUserId());
|
||||
|
||||
// Find all registrars that have a registrar contact with this user's ID.
|
||||
if (tm().isOfy()) {
|
||||
|
|
|
@ -74,7 +74,7 @@ public class OAuthAuthenticationMechanism implements AuthenticationMechanism {
|
|||
String header = request.getHeader(AUTHORIZATION);
|
||||
if ((header == null) || !header.startsWith(BEARER_PREFIX)) {
|
||||
if (header != null) {
|
||||
logger.atInfo().log("invalid authorization header");
|
||||
logger.atInfo().log("Invalid authorization header.");
|
||||
}
|
||||
return AuthResult.create(NONE);
|
||||
}
|
||||
|
@ -94,14 +94,14 @@ public class OAuthAuthenticationMechanism implements AuthenticationMechanism {
|
|||
currentUser = oauthService.getCurrentUser(availableOauthScopeArray);
|
||||
isUserAdmin = oauthService.isUserAdmin(availableOauthScopeArray);
|
||||
logger.atInfo().log(
|
||||
"current user: %s (%s)", currentUser, isUserAdmin ? "admin" : "not admin");
|
||||
"Current user: %s (%s).", currentUser, isUserAdmin ? "admin" : "not admin");
|
||||
oauthClientId = oauthService.getClientId(availableOauthScopeArray);
|
||||
logger.atInfo().log("client ID: %s", oauthClientId);
|
||||
logger.atInfo().log("OAuth client ID: %s", oauthClientId);
|
||||
authorizedScopes =
|
||||
ImmutableSet.copyOf(oauthService.getAuthorizedScopes(availableOauthScopeArray));
|
||||
logger.atInfo().log("authorized scope(s): %s", authorizedScopes);
|
||||
logger.atInfo().log("Authorized scope(s): %s", authorizedScopes);
|
||||
} catch (OAuthRequestException | OAuthServiceFailureException e) {
|
||||
logger.atInfo().withCause(e).log("unable to get OAuth information");
|
||||
logger.atInfo().withCause(e).log("Unable to get OAuth information.");
|
||||
return AuthResult.create(NONE);
|
||||
}
|
||||
if ((currentUser == null) || (oauthClientId == null) || (authorizedScopes == null)) {
|
||||
|
@ -111,13 +111,13 @@ public class OAuthAuthenticationMechanism implements AuthenticationMechanism {
|
|||
// Make sure that the client ID matches, to avoid a confused deputy attack; see:
|
||||
// http://stackoverflow.com/a/17439317/1179226
|
||||
if (!allowedOauthClientIds.contains(oauthClientId)) {
|
||||
logger.atInfo().log("client ID is not allowed");
|
||||
logger.atInfo().log("OAuth client ID is not allowed.");
|
||||
return AuthResult.create(NONE);
|
||||
}
|
||||
|
||||
// Make sure that all required scopes are present.
|
||||
if (!authorizedScopes.containsAll(requiredOauthScopes)) {
|
||||
logger.atInfo().log("required scope(s) missing");
|
||||
logger.atInfo().log("Missing required scope(s).");
|
||||
return AuthResult.create(NONE);
|
||||
}
|
||||
|
||||
|
|
|
@ -114,13 +114,13 @@ public class RequestAuthenticator {
|
|||
break;
|
||||
case APP:
|
||||
if (!authResult.isAuthenticated()) {
|
||||
logger.atWarning().log("Not authorized; no authentication found");
|
||||
logger.atWarning().log("Not authorized; no authentication found.");
|
||||
return Optional.empty();
|
||||
}
|
||||
break;
|
||||
case USER:
|
||||
if (authResult.authLevel() != AuthLevel.USER) {
|
||||
logger.atWarning().log("Not authorized; no authenticated user");
|
||||
logger.atWarning().log("Not authorized; no authenticated user.");
|
||||
// TODO(mountford): change this so that the caller knows to return a more helpful error
|
||||
return Optional.empty();
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ public class RequestAuthenticator {
|
|||
switch (auth.userPolicy()) {
|
||||
case IGNORED:
|
||||
if (authResult.authLevel() == AuthLevel.USER) {
|
||||
logger.atWarning().log("Not authorized; user policy is IGNORED, but a user was found");
|
||||
logger.atWarning().log("Not authorized; user policy is IGNORED, but a user was found.");
|
||||
return Optional.empty();
|
||||
}
|
||||
break;
|
||||
|
@ -140,7 +140,7 @@ public class RequestAuthenticator {
|
|||
if (authResult.userAuthInfo().isPresent()
|
||||
&& !authResult.userAuthInfo().get().isUserAdmin()) {
|
||||
logger.atWarning().log(
|
||||
"Not authorized; user policy is ADMIN, but the user was not an admin");
|
||||
"Not authorized; user policy is ADMIN, but the user was not an admin.");
|
||||
return Optional.empty();
|
||||
}
|
||||
break;
|
||||
|
@ -192,7 +192,7 @@ public class RequestAuthenticator {
|
|||
break;
|
||||
}
|
||||
}
|
||||
logger.atInfo().log("No authentication found");
|
||||
logger.atInfo().log("No authentication found.");
|
||||
return AuthResult.NOT_AUTHENTICATED;
|
||||
}
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ public class LockHandlerImpl implements LockHandler {
|
|||
for (String lockName : lockNames) {
|
||||
Optional<Lock> lock = acquire(lockName, tld, leaseLength);
|
||||
if (!lock.isPresent()) {
|
||||
logger.atInfo().log("Couldn't acquire lock named: %s for TLD: %s", lockName, tld);
|
||||
logger.atInfo().log("Couldn't acquire lock named: %s for TLD %s.", lockName, tld);
|
||||
return false;
|
||||
}
|
||||
logger.atInfo().log("Acquired lock: %s", lock);
|
||||
|
|
|
@ -55,7 +55,7 @@ public final class JsonHttp {
|
|||
public static Map<String, ?> read(HttpServletRequest req) throws IOException {
|
||||
if (!"POST".equals(req.getMethod())
|
||||
&& !"PUT".equals(req.getMethod())) {
|
||||
logger.atWarning().log("JSON request payload only allowed for POST/PUT");
|
||||
logger.atWarning().log("JSON request payload only allowed for POST/PUT.");
|
||||
return null;
|
||||
}
|
||||
if (!JSON_UTF_8.is(MediaType.parse(req.getContentType()))) {
|
||||
|
@ -66,7 +66,7 @@ public final class JsonHttp {
|
|||
try {
|
||||
return checkNotNull((Map<String, ?>) JSONValue.parseWithException(jsonReader));
|
||||
} catch (ParseException | NullPointerException | ClassCastException e) {
|
||||
logger.atWarning().withCause(e).log("Malformed JSON");
|
||||
logger.atWarning().withCause(e).log("Malformed JSON.");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ public final class NordnVerifyAction implements Runnable {
|
|||
*/
|
||||
@VisibleForTesting
|
||||
LordnLog verify() throws IOException {
|
||||
logger.atInfo().log("LORDN verify task %s: Sending request to URL %s.", actionLogId, url);
|
||||
logger.atInfo().log("LORDN verify task %s: Sending request to URL %s", actionLogId, url);
|
||||
HTTPRequest req = new HTTPRequest(url, GET, validateCertificate().setDeadline(60d));
|
||||
lordnRequestInitializer.initialize(req, tld);
|
||||
HTTPResponse rsp;
|
||||
|
@ -121,7 +121,7 @@ public final class NordnVerifyAction implements Runnable {
|
|||
LordnLog log =
|
||||
LordnLog.parse(ByteSource.wrap(rsp.getContent()).asCharSource(UTF_8).readLines());
|
||||
if (log.getStatus() == LordnLog.Status.ACCEPTED) {
|
||||
logger.atInfo().log("LORDN verify task %s: Upload accepted", actionLogId);
|
||||
logger.atInfo().log("LORDN verify task %s: Upload accepted.", actionLogId);
|
||||
} else {
|
||||
logger.atSevere().log(
|
||||
"LORDN verify task %s: Upload rejected with reason: %s", actionLogId, log);
|
||||
|
|
|
@ -58,7 +58,7 @@ public final class TmchDnlAction implements Runnable {
|
|||
ClaimsList claims = ClaimsListParser.parse(lines);
|
||||
ClaimsListDao.save(claims);
|
||||
logger.atInfo().log(
|
||||
"Inserted %,d claims into the DB(s), created at %s",
|
||||
"Inserted %,d claims into the DB(s), created at %s.",
|
||||
claims.size(), claims.getTmdbGenerationTime());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ public final class TmchSmdrlAction implements Runnable {
|
|||
SignedMarkRevocationList smdrl = SmdrlCsvParser.parse(lines);
|
||||
smdrl.save();
|
||||
logger.atInfo().log(
|
||||
"Inserted %,d smd revocations into the database, created at %s",
|
||||
"Inserted %,d smd revocations into the database, created at %s.",
|
||||
smdrl.size(), smdrl.getCreationTime());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,13 +51,14 @@ abstract class CreateOrUpdatePremiumListCommand extends ConfirmingCommand
|
|||
|
||||
@Override
|
||||
public String execute() throws Exception {
|
||||
String message = String.format("Saved premium list %s with %d entries", name, inputData.size());
|
||||
String message =
|
||||
String.format("Saved premium list %s with %d entries.", name, inputData.size());
|
||||
try {
|
||||
logger.atInfo().log("Saving premium list for TLD %s", name);
|
||||
logger.atInfo().log("Saving premium list for TLD %s.", name);
|
||||
PremiumListDao.save(name, currency, inputData);
|
||||
logger.atInfo().log(message);
|
||||
} catch (Throwable e) {
|
||||
message = "Unexpected error saving premium list from nomulus tool command";
|
||||
message = "Unexpected error saving premium list from nomulus tool command.";
|
||||
logger.atSevere().withCause(e).log(message);
|
||||
}
|
||||
return message;
|
||||
|
|
|
@ -58,14 +58,14 @@ public abstract class CreateOrUpdateReservedListCommand extends ConfirmingComman
|
|||
protected String execute() {
|
||||
String message =
|
||||
String.format(
|
||||
"Saved reserved list %s with %d entries",
|
||||
"Saved reserved list %s with %d entries.",
|
||||
name, reservedList.getReservedListEntries().size());
|
||||
try {
|
||||
logger.atInfo().log("Saving reserved list for TLD %s", name);
|
||||
logger.atInfo().log("Saving reserved list for TLD %s.", name);
|
||||
ReservedListDao.save(reservedList);
|
||||
logger.atInfo().log(message);
|
||||
} catch (Throwable e) {
|
||||
message = "Unexpected error saving reserved list from nomulus tool command";
|
||||
message = "Unexpected error saving reserved list from nomulus tool command.";
|
||||
logger.atSevere().withCause(e).log(message);
|
||||
}
|
||||
return message;
|
||||
|
|
|
@ -281,7 +281,7 @@ final class UpdateDomainCommand extends CreateOrUpdateDomainCommand {
|
|||
|| clearDsRecords);
|
||||
|
||||
if (!add && !remove && !change && !secDns && autorenews == null) {
|
||||
logger.atInfo().log("No changes need to be made to domain %s", domain);
|
||||
logger.atInfo().log("No changes need to be made to domain '%s'.", domain);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -113,7 +113,8 @@ public class BackfillRegistryLocksCommand extends ConfirmingCommand
|
|||
.build());
|
||||
} catch (Throwable t) {
|
||||
logger.atSevere().withCause(t).log(
|
||||
"Error when creating lock object for domain %s.", domainBase.getDomainName());
|
||||
"Error when creating lock object for domain '%s'.",
|
||||
domainBase.getDomainName());
|
||||
failedDomainsBuilder.add(domainBase.getDomainName());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,7 +36,6 @@ import java.io.PrintWriter;
|
|||
import java.io.StringWriter;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import javax.annotation.Nullable;
|
||||
import javax.inject.Inject;
|
||||
|
||||
/** Action that creates Google Groups for a registrar's mailing lists. */
|
||||
|
@ -98,7 +97,7 @@ public class CreateGroupsAction implements Runnable {
|
|||
responseWriter.append(types.get(i).getDisplayName()).append(" => ");
|
||||
e.get().printStackTrace(responseWriter);
|
||||
logger.atSevere().withCause(e.get()).log(
|
||||
"Could not create Google Group for registrar %s for type %s",
|
||||
"Could not create Google Group for registrar %s for type %s.",
|
||||
registrar.getRegistrarName(), types.get(i));
|
||||
} else {
|
||||
responseWriter.printf("%s => Success%n", types.get(i).getDisplayName());
|
||||
|
@ -109,11 +108,10 @@ public class CreateGroupsAction implements Runnable {
|
|||
response.setStatus(SC_OK);
|
||||
response.setPayload("Success!");
|
||||
logger.atInfo().log(
|
||||
"Successfully created groups for registrar: %s", registrar.getRegistrarName());
|
||||
"Successfully created groups for registrar %s.", registrar.getRegistrarName());
|
||||
}
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private Registrar initAndLoadRegistrar() {
|
||||
if (!clientId.isPresent()) {
|
||||
respondToBadRequest("Error creating Google Groups, missing parameter: clientId");
|
||||
|
|
|
@ -118,7 +118,7 @@ public class RefreshDnsForAllDomainsAction implements Runnable {
|
|||
Duration.standardMinutes(random.nextInt(smearMinutes)));
|
||||
} catch (Throwable t) {
|
||||
logger.atSevere().withCause(t).log(
|
||||
"Error while enqueuing DNS refresh for domain %s", domainName);
|
||||
"Error while enqueuing DNS refresh for domain '%s'.", domainName);
|
||||
response.setStatus(HttpStatus.SC_INTERNAL_SERVER_ERROR);
|
||||
}
|
||||
}));
|
||||
|
@ -158,7 +158,7 @@ public class RefreshDnsForAllDomainsAction implements Runnable {
|
|||
getContext().incrementCounter("active domains refreshed");
|
||||
} catch (Throwable t) {
|
||||
logger.atSevere().withCause(t).log(
|
||||
"Error while enqueuing DNS refresh for domain %s", domainName);
|
||||
"Error while enqueuing DNS refresh for domain '%s'.", domainName);
|
||||
getContext().incrementCounter("active domains errored");
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -98,7 +98,7 @@ public abstract class HtmlAction implements Runnable {
|
|||
data.put("xsrfToken", xsrfTokenManager.generateToken(user.getEmail()));
|
||||
|
||||
logger.atInfo().log(
|
||||
"User %s is accessing %s. Method= %s",
|
||||
"User %s is accessing %s with method %s.",
|
||||
authResult.userIdForLogging(), getClass().getName(), method);
|
||||
runAfterLogin(data);
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ public final class OteStatusAction implements Runnable, JsonActionRunner.JsonAct
|
|||
SUCCESS, "OT&E check completed successfully", convertOteStats(baseClientId, oteStats));
|
||||
} catch (Throwable e) {
|
||||
logger.atWarning().withCause(e).log(
|
||||
"Failed to verify OT&E status for registrar with input %s", input);
|
||||
"Failed to verify OT&E status for registrar with input: %s", input);
|
||||
return JsonResponseHelper.create(
|
||||
ERROR, Optional.ofNullable(e.getMessage()).orElse("Unspecified error"));
|
||||
}
|
||||
|
|
|
@ -204,11 +204,11 @@ public class RegistrarSettingsAction implements Runnable, JsonActionRunner.JsonA
|
|||
RegistrarFormFields.LAST_UPDATE_TIME.extractUntyped(args).get();
|
||||
if (!latestFromArgs.equals(latest)) {
|
||||
logger.atWarning().log(
|
||||
"registrar changed since reading the data! "
|
||||
+ " Last updated at %s, but args data last updated at %s",
|
||||
"Registrar changed since reading the data!"
|
||||
+ " Last updated at %s, but args data last updated at %s.",
|
||||
latest, latestFromArgs);
|
||||
throw new IllegalStateException(
|
||||
"registrar has been changed by someone else. Please reload and retry.");
|
||||
"Registrar has been changed by someone else. Please reload and retry.");
|
||||
}
|
||||
|
||||
// Keep the current contacts so we can later check that no required contact was
|
||||
|
|
|
@ -109,10 +109,11 @@ public final class RegistryLockGetAction implements JsonGetAction {
|
|||
response.setPayload(GSON.toJson(payload));
|
||||
} catch (RegistrarAccessDeniedException e) {
|
||||
logger.atWarning().withCause(e).log(
|
||||
"User %s doesn't have access to this registrar", authResult.userIdForLogging());
|
||||
"User %s doesn't have access to this registrar.", authResult.userIdForLogging());
|
||||
response.setStatus(SC_FORBIDDEN);
|
||||
} catch (Exception e) {
|
||||
logger.atWarning().withCause(e).log("Unexpected error when retrieving locks for a registrar");
|
||||
logger.atWarning().withCause(e).log(
|
||||
"Unexpected error when retrieving locks for a registrar.");
|
||||
response.setStatus(SC_INTERNAL_SERVER_ERROR);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -147,7 +147,7 @@ public class RegistryLockPostAction implements Runnable, JsonActionRunner.JsonAc
|
|||
String action = postInput.isLock ? "lock" : "unlock";
|
||||
return JsonResponseHelper.create(SUCCESS, String.format("Successful %s", action));
|
||||
} catch (Throwable e) {
|
||||
logger.atWarning().withCause(e).log("Failed to lock/unlock domain");
|
||||
logger.atWarning().withCause(e).log("Failed to lock/unlock domain.");
|
||||
return JsonResponseHelper.create(
|
||||
ERROR,
|
||||
Optional.ofNullable(Throwables.getRootCause(e).getMessage()).orElse("Unspecified error"));
|
||||
|
|
|
@ -77,7 +77,7 @@ public final class RegistryLockVerifyAction extends HtmlAction {
|
|||
data.put("domainName", resultLock.getDomainName());
|
||||
} catch (Throwable t) {
|
||||
logger.atWarning().withCause(t).log(
|
||||
"Error when verifying verification code %s", lockVerificationCode);
|
||||
"Error when verifying verification code '%s'.", lockVerificationCode);
|
||||
data.put("success", false);
|
||||
data.put("errorMessage", Throwables.getRootCause(t).getMessage());
|
||||
}
|
||||
|
|
|
@ -157,7 +157,7 @@ final class DomainWhoisResponse extends WhoisResponseImpl {
|
|||
ContactResource contactResource = EppResource.loadCached(contact.get());
|
||||
if (contactResource == null) {
|
||||
logger.atSevere().log(
|
||||
"(BUG) Broken reference found from domain %s to contact %s",
|
||||
"(BUG) Broken reference found from domain %s to contact %s.",
|
||||
domain.getDomainName(), contact);
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ public class WhoisAction implements Runnable {
|
|||
responseText = results.plainTextOutput();
|
||||
setWhoisMetrics(metricBuilder, 0, e.getStatus());
|
||||
} catch (Throwable t) {
|
||||
logger.atSevere().withCause(t).log("WHOIS request crashed");
|
||||
logger.atSevere().withCause(t).log("WHOIS request crashed.");
|
||||
responseText = "Internal Server Error";
|
||||
setWhoisMetrics(metricBuilder, 0, SC_INTERNAL_SERVER_ERROR);
|
||||
}
|
||||
|
|
|
@ -119,7 +119,8 @@ class WhoisReader {
|
|||
|
||||
// Try to parse the argument as a domain name.
|
||||
try {
|
||||
logger.atInfo().log("Attempting domain lookup command using domain name %s", tokens.get(1));
|
||||
logger.atInfo().log(
|
||||
"Attempting domain lookup command using domain name '%s'.", tokens.get(1));
|
||||
return commandFactory.domainLookup(
|
||||
InternetDomainName.from(canonicalizeDomainName(tokens.get(1))),
|
||||
fullOutput,
|
||||
|
@ -141,7 +142,7 @@ class WhoisReader {
|
|||
// Try to parse the argument as an IP address.
|
||||
try {
|
||||
logger.atInfo().log(
|
||||
"Attempting nameserver lookup command using %s as an IP address", tokens.get(1));
|
||||
"Attempting nameserver lookup command using %s as an IP address.", tokens.get(1));
|
||||
return commandFactory.nameserverLookupByIp(InetAddresses.forString(tokens.get(1)));
|
||||
} catch (IllegalArgumentException iae) {
|
||||
// Silently ignore this exception.
|
||||
|
@ -150,7 +151,7 @@ class WhoisReader {
|
|||
// Try to parse the argument as a host name.
|
||||
try {
|
||||
logger.atInfo().log(
|
||||
"Attempting nameserver lookup command using %s as a hostname", tokens.get(1));
|
||||
"Attempting nameserver lookup command using %s as a hostname.", tokens.get(1));
|
||||
return commandFactory.nameserverLookupByHost(InternetDomainName.from(
|
||||
canonicalizeDomainName(tokens.get(1))));
|
||||
} catch (IllegalArgumentException iae) {
|
||||
|
@ -170,7 +171,7 @@ class WhoisReader {
|
|||
}
|
||||
String registrarLookupArgument = Joiner.on(' ').join(tokens.subList(1, tokens.size()));
|
||||
logger.atInfo().log(
|
||||
"Attempting registrar lookup command using registrar %s", registrarLookupArgument);
|
||||
"Attempting registrar lookup command using registrar %s.", registrarLookupArgument);
|
||||
return commandFactory.registrarLookup(registrarLookupArgument);
|
||||
}
|
||||
|
||||
|
@ -178,7 +179,7 @@ class WhoisReader {
|
|||
if (tokens.size() == 1) {
|
||||
// Try to parse it as an IP address. If successful, then this is a lookup on a nameserver.
|
||||
try {
|
||||
logger.atInfo().log("Attempting nameserver lookup using %s as an IP address", arg1);
|
||||
logger.atInfo().log("Attempting nameserver lookup using %s as an IP address.", arg1);
|
||||
return commandFactory.nameserverLookupByIp(InetAddresses.forString(arg1));
|
||||
} catch (IllegalArgumentException iae) {
|
||||
// Silently ignore this exception.
|
||||
|
@ -193,19 +194,19 @@ class WhoisReader {
|
|||
Optional<InternetDomainName> tld = findTldForName(targetName);
|
||||
if (!tld.isPresent()) {
|
||||
// This target is not under any configured TLD, so just try it as a registrar name.
|
||||
logger.atInfo().log("Attempting registrar lookup using %s as a registrar", arg1);
|
||||
logger.atInfo().log("Attempting registrar lookup using %s as a registrar.", arg1);
|
||||
return commandFactory.registrarLookup(arg1);
|
||||
}
|
||||
|
||||
// If the target is exactly one level above the TLD, then this is a second level domain
|
||||
// (SLD) and we should do a domain lookup on it.
|
||||
if (targetName.parent().equals(tld.get())) {
|
||||
logger.atInfo().log("Attempting domain lookup using %s as a domain name", targetName);
|
||||
logger.atInfo().log("Attempting domain lookup using %s as a domain name.", targetName);
|
||||
return commandFactory.domainLookup(targetName, fullOutput, whoisRedactedEmailText);
|
||||
}
|
||||
|
||||
// The target is more than one level above the TLD, so we'll assume it's a nameserver.
|
||||
logger.atInfo().log("Attempting nameserver lookup using %s as a hostname", targetName);
|
||||
logger.atInfo().log("Attempting nameserver lookup using %s as a hostname.", targetName);
|
||||
return commandFactory.nameserverLookupByHost(targetName);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Silently ignore this exception.
|
||||
|
@ -218,7 +219,7 @@ class WhoisReader {
|
|||
// assume this is a registrar lookup, since there's really nothing else it could be.
|
||||
String registrarLookupArgument = Joiner.on(' ').join(tokens);
|
||||
logger.atInfo().log(
|
||||
"Attempting registrar lookup employing %s as a registrar", registrarLookupArgument);
|
||||
"Attempting registrar lookup employing %s as a registrar.", registrarLookupArgument);
|
||||
return commandFactory.registrarLookup(registrarLookupArgument);
|
||||
}
|
||||
|
||||
|
|
|
@ -82,7 +82,7 @@ class ExtensionManagerTest {
|
|||
assertThat(logMessages.build())
|
||||
.contains(
|
||||
"Client clientId is attempting to run HelloFlow without declaring "
|
||||
+ "URIs [urn:google:params:xml:ns:metadata-1.0] on login");
|
||||
+ "URIs [urn:google:params:xml:ns:metadata-1.0] on login.");
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -213,7 +213,7 @@ public class ReplicateToDatastoreActionTest {
|
|||
new IllegalStateException(
|
||||
"Missing transaction: last txn id = -1, next available txn = 1"));
|
||||
assertThat(response.getStatus()).isEqualTo(SC_INTERNAL_SERVER_ERROR);
|
||||
assertThat(response.getPayload()).isEqualTo("Errored out replaying files");
|
||||
assertThat(response.getPayload()).isEqualTo("Errored out replaying files.");
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -287,7 +287,7 @@ public class ReplicateToDatastoreActionTest {
|
|||
action.run();
|
||||
assertThat(response.getStatus()).isEqualTo(SC_OK);
|
||||
assertThat(response.getPayload())
|
||||
.isEqualTo("Replayed 1 transaction(s) from Cloud SQL -> Datastore");
|
||||
.isEqualTo("Replayed 1 transaction(s) from Cloud SQL -> Datastore.");
|
||||
}
|
||||
|
||||
private void resetAction() {
|
||||
|
|
|
@ -343,7 +343,7 @@ class WhoisReaderTest {
|
|||
assertAboutLogs()
|
||||
.that(testLogHandler)
|
||||
.hasLogAtLevelWithMessage(
|
||||
Level.INFO, "Attempting domain lookup command using domain name example.tld");
|
||||
Level.INFO, "Attempting domain lookup command using domain name 'example.tld'.");
|
||||
}
|
||||
|
||||
@TestOfyAndSql
|
||||
|
@ -352,7 +352,8 @@ class WhoisReaderTest {
|
|||
assertAboutLogs()
|
||||
.that(testLogHandler)
|
||||
.hasLogAtLevelWithMessage(
|
||||
Level.INFO, "Attempting nameserver lookup command using 43.34.12.213 as an IP address");
|
||||
Level.INFO,
|
||||
"Attempting nameserver lookup command using 43.34.12.213 as an IP address.");
|
||||
}
|
||||
|
||||
@TestOfyAndSql
|
||||
|
@ -361,7 +362,7 @@ class WhoisReaderTest {
|
|||
assertAboutLogs()
|
||||
.that(testLogHandler)
|
||||
.hasLogAtLevelWithMessage(
|
||||
Level.INFO, "Attempting nameserver lookup command using ns.example.tld as a hostname");
|
||||
Level.INFO, "Attempting nameserver lookup command using ns.example.tld as a hostname.");
|
||||
}
|
||||
|
||||
@TestOfyAndSql
|
||||
|
@ -380,7 +381,7 @@ class WhoisReaderTest {
|
|||
assertAboutLogs()
|
||||
.that(testLogHandler)
|
||||
.hasLogAtLevelWithMessage(
|
||||
Level.INFO, "Attempting nameserver lookup using 43.34.12.213 as an IP address");
|
||||
Level.INFO, "Attempting nameserver lookup using 43.34.12.213 as an IP address.");
|
||||
}
|
||||
|
||||
@TestOfyAndSql
|
||||
|
@ -389,7 +390,7 @@ class WhoisReaderTest {
|
|||
assertAboutLogs()
|
||||
.that(testLogHandler)
|
||||
.hasLogAtLevelWithMessage(
|
||||
Level.INFO, "Attempting registrar lookup using test as a registrar");
|
||||
Level.INFO, "Attempting registrar lookup using test as a registrar.");
|
||||
}
|
||||
|
||||
@TestOfyAndSql
|
||||
|
@ -398,7 +399,7 @@ class WhoisReaderTest {
|
|||
assertAboutLogs()
|
||||
.that(testLogHandler)
|
||||
.hasLogAtLevelWithMessage(
|
||||
Level.INFO, "Attempting domain lookup using example.tld as a domain name");
|
||||
Level.INFO, "Attempting domain lookup using example.tld as a domain name.");
|
||||
}
|
||||
|
||||
@TestOfyAndSql
|
||||
|
@ -407,7 +408,7 @@ class WhoisReaderTest {
|
|||
assertAboutLogs()
|
||||
.that(testLogHandler)
|
||||
.hasLogAtLevelWithMessage(
|
||||
Level.INFO, "Attempting nameserver lookup using ns.example.tld as a hostname");
|
||||
Level.INFO, "Attempting nameserver lookup using ns.example.tld as a hostname.");
|
||||
}
|
||||
|
||||
@TestOfyAndSql
|
||||
|
@ -417,6 +418,6 @@ class WhoisReaderTest {
|
|||
.that(testLogHandler)
|
||||
.hasLogAtLevelWithMessage(
|
||||
Level.INFO,
|
||||
"Attempting registrar lookup employing Example Registrar, Inc. as a registrar");
|
||||
"Attempting registrar lookup employing Example Registrar, Inc. as a registrar.");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -97,7 +97,7 @@ public abstract class ActionHandler extends SimpleChannelInboundHandler<InboundM
|
|||
|
||||
// As this was an ERROR in performing the action, we must close the channel
|
||||
ChannelFuture closedFuture = ctx.channel().close();
|
||||
closedFuture.addListener(f -> logger.atInfo().log("Unsuccessful channel connection closed"));
|
||||
closedFuture.addListener(f -> logger.atInfo().log("Unsuccessful channel connection closed."));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -145,7 +145,7 @@ public class WebWhoisActionHandler extends ActionHandler {
|
|||
future.addListener(
|
||||
f -> {
|
||||
if (f.isSuccess()) {
|
||||
logger.atInfo().log("Successfully Closed Connection.");
|
||||
logger.atInfo().log("Successfully closed connection.");
|
||||
} else {
|
||||
logger.atWarning().log("Channel was unsuccessfully closed.");
|
||||
}
|
||||
|
|
|
@ -316,7 +316,7 @@ public class ProxyServer implements Runnable {
|
|||
MetricReporter metricReporter = proxyComponent.metricReporter();
|
||||
try {
|
||||
metricReporter.startAsync().awaitRunning(java.time.Duration.ofSeconds(10));
|
||||
logger.atInfo().log("Started up MetricReporter");
|
||||
logger.atInfo().log("Started up MetricReporter.");
|
||||
} catch (TimeoutException timeoutException) {
|
||||
logger.atSevere().withCause(timeoutException).log(
|
||||
"Failed to initialize MetricReporter: %s", timeoutException);
|
||||
|
@ -327,7 +327,7 @@ public class ProxyServer implements Runnable {
|
|||
() -> {
|
||||
try {
|
||||
metricReporter.stopAsync().awaitTerminated(java.time.Duration.ofSeconds(10));
|
||||
logger.atInfo().log("Shut down MetricReporter");
|
||||
logger.atInfo().log("Shut down MetricReporter.");
|
||||
} catch (TimeoutException timeoutException) {
|
||||
logger.atWarning().withCause(timeoutException).log(
|
||||
"Failed to stop MetricReporter: %s", timeoutException);
|
||||
|
|
|
@ -68,7 +68,8 @@ public class AppEngineServiceUtilsImpl implements AppEngineServiceUtils {
|
|||
public String convertToSingleSubdomain(String hostname) {
|
||||
Matcher matcher = APPSPOT_HOSTNAME_PATTERN.matcher(hostname);
|
||||
if (!matcher.matches()) {
|
||||
logger.atWarning().log("Skipping conversion because hostname can't be parsed: %s", hostname);
|
||||
logger.atWarning().log(
|
||||
"Skipping conversion because hostname '%s' can't be parsed.", hostname);
|
||||
return hostname;
|
||||
}
|
||||
return matcher.group(1).replace(".", "-dot-") + ".appspot.com";
|
||||
|
|
|
@ -55,7 +55,7 @@ public class RequestStatusCheckerImpl implements RequestStatusChecker {
|
|||
public String getLogId() {
|
||||
String requestLogId =
|
||||
ApiProxy.getCurrentEnvironment().getAttributes().get(REQUEST_LOG_ID_KEY).toString();
|
||||
logger.atInfo().log("Current requestLogId: %s", requestLogId);
|
||||
logger.atInfo().log("Current requestLogId: %s.", requestLogId);
|
||||
// We want to make sure there actually is a log to query for this request, even if the request
|
||||
// dies right after this call.
|
||||
//
|
||||
|
@ -85,7 +85,7 @@ public class RequestStatusCheckerImpl implements RequestStatusChecker {
|
|||
// So we have to assume it's "running" in that case.
|
||||
if (requestLogs == null) {
|
||||
logger.atInfo().log(
|
||||
"Queried an unrecognized requestLogId %s - assume it's running", requestLogId);
|
||||
"Queried an unrecognized requestLogId %s - assume it's running.", requestLogId);
|
||||
return true;
|
||||
}
|
||||
logger.atInfo().log(
|
||||
|
|
|
@ -173,6 +173,6 @@ public class Retrier implements Serializable {
|
|||
|
||||
private static void reportFailure(Throwable thrown, int failures, int maxAttempts) {
|
||||
logger.atInfo().withCause(thrown).log(
|
||||
"Retrying transient error, attempt %d/%d", failures, maxAttempts);
|
||||
"Retrying transient error, attempt %d/%d.", failures, maxAttempts);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ public class TaskQueueUtils implements Serializable {
|
|||
() -> {
|
||||
for (TaskOptions task : tasks) {
|
||||
logger.atInfo().log(
|
||||
"Enqueuing queue='%s' endpoint='%s'", queue.getQueueName(), task.getUrl());
|
||||
"Enqueuing queue='%s' endpoint='%s'.", queue.getQueueName(), task.getUrl());
|
||||
}
|
||||
return queue.add(tasks);
|
||||
},
|
||||
|
|
Loading…
Add table
Reference in a new issue