Refer to Datastore everywhere correctly by its capitalized form

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=147479683
This commit is contained in:
mcilwain 2017-02-14 09:11:30 -08:00 committed by Ben McIlwain
parent a8cf81bca2
commit cdadb54acd
123 changed files with 232 additions and 235 deletions

View file

@ -23,7 +23,7 @@ use). This makes it clear what you're getting at the callsite.
`@VirtualEntity`-annotated entities, and `@NotBackedUp`-annotated entities. An `@VirtualEntity`-annotated entities, and `@NotBackedUp`-annotated entities. An
`@VirtualEntity` is a "virtual entity" that just serves to construct parent keys `@VirtualEntity` is a "virtual entity" that just serves to construct parent keys
for other entities (e.g. `EppResourceIndexBucket`) and is never written to for other entities (e.g. `EppResourceIndexBucket`) and is never written to
datastore itself. An `@NotBackedUp`-annotated entity is one that specifically Datastore itself. An `@NotBackedUp`-annotated entity is one that specifically
shouldn't be backed up (like the commit log entities themselves). shouldn't be backed up (like the commit log entities themselves).
We don't actually prevent you from not-backing-up a regular entity, because We don't actually prevent you from not-backing-up a regular entity, because

View file

@ -41,7 +41,7 @@ public class BackupUtils {
} }
/** /**
* Converts the given {@link ImmutableObject} to a raw datastore entity and write it to an * Converts the given {@link ImmutableObject} to a raw Datastore entity and write it to an
* {@link OutputStream} in delimited protocol buffer format. * {@link OutputStream} in delimited protocol buffer format.
*/ */
static void serializeEntity(ImmutableObject entity, OutputStream stream) throws IOException { static void serializeEntity(ImmutableObject entity, OutputStream stream) throws IOException {
@ -51,7 +51,7 @@ public class BackupUtils {
/** /**
* Return an iterator of {@link ImmutableObject} instances deserialized from the given stream. * Return an iterator of {@link ImmutableObject} instances deserialized from the given stream.
* *
* <p>This parses out delimited protocol buffers for raw datastore entities and then Ofy-loads * <p>This parses out delimited protocol buffers for raw Datastore entities and then Ofy-loads
* those as {@link ImmutableObject}. * those as {@link ImmutableObject}.
* *
* <p>The iterator reads from the stream on demand, and as such will fail if the stream is closed. * <p>The iterator reads from the stream on demand, and as such will fail if the stream is closed.

View file

@ -33,7 +33,7 @@ import javax.inject.Inject;
import org.joda.time.DateTime; import org.joda.time.DateTime;
/** /**
* Action that saves commit log checkpoints to datastore and kicks off a diff export task. * Action that saves commit log checkpoints to Datastore and kicks off a diff export task.
* *
* <p>We separate computing and saving the checkpoint from exporting it because the export to GCS * <p>We separate computing and saving the checkpoint from exporting it because the export to GCS
* is retryable but should not require the computation of a new checkpoint. Saving the checkpoint * is retryable but should not require the computation of a new checkpoint. Saving the checkpoint

View file

@ -40,7 +40,7 @@ import org.joda.time.DateTime;
* *
* <p>This algorithm examines the recently written commit log data and uses a dual-read approach * <p>This algorithm examines the recently written commit log data and uses a dual-read approach
* to determine a point-in-time consistent set of checkpoint times for the commit log buckets. By * to determine a point-in-time consistent set of checkpoint times for the commit log buckets. By
* "consistent" we mean, generally speaking, that if the datastore were restored by replaying all * "consistent" we mean, generally speaking, that if the Datastore were restored by replaying all
* the commit logs up to the checkpoint times of the buckets, the result would be transactionally * the commit logs up to the checkpoint times of the buckets, the result would be transactionally
* correct; there must be no "holes" where restored state depends on non-restored state. * correct; there must be no "holes" where restored state depends on non-restored state.
* *
@ -116,7 +116,7 @@ class CommitLogCheckpointStrategy {
*/ */
@VisibleForTesting @VisibleForTesting
ImmutableMap<Integer, DateTime> readBucketTimestamps() { ImmutableMap<Integer, DateTime> readBucketTimestamps() {
// Use a fresh session cache so that we get the latest data from datastore. // Use a fresh session cache so that we get the latest data from Datastore.
return ofy.doWithFreshSessionCache(new Work<ImmutableMap<Integer, DateTime>>() { return ofy.doWithFreshSessionCache(new Work<ImmutableMap<Integer, DateTime>>() {
@Override @Override
public ImmutableMap<Integer, DateTime> run() { public ImmutableMap<Integer, DateTime> run() {

View file

@ -42,7 +42,7 @@ import org.joda.time.Duration;
/** /**
* Task that garbage collects old {@link CommitLogManifest} entities. * Task that garbage collects old {@link CommitLogManifest} entities.
* *
* <p>Once commit logs have been written to GCS, we don't really need them in datastore anymore, * <p>Once commit logs have been written to GCS, we don't really need them in Datastore anymore,
* except to reconstruct point-in-time snapshots of the database. But that functionality is not * except to reconstruct point-in-time snapshots of the database. But that functionality is not
* useful after a certain amount of time, e.g. thirty days. So this task runs periodically to delete * useful after a certain amount of time, e.g. thirty days. So this task runs periodically to delete
* the old data. * the old data.
@ -72,7 +72,7 @@ import org.joda.time.Duration;
* commitLogMaxDeletes} for further documentation on this matter. * commitLogMaxDeletes} for further documentation on this matter.
* *
* <p>Finally, we need to pick an appropriate cron interval time for this task. Since a bucket * <p>Finally, we need to pick an appropriate cron interval time for this task. Since a bucket
* represents a single datastore entity group, it's only guaranteed to have one transaction per * represents a single Datastore entity group, it's only guaranteed to have one transaction per
* second. So we just need to divide {@code maxDeletes} by sixty to get an appropriate minute * second. So we just need to divide {@code maxDeletes} by sixty to get an appropriate minute
* interval. Assuming {@code maxDeletes} is five hundred, this rounds up to ten minutes, which we'll * interval. Assuming {@code maxDeletes} is five hundred, this rounds up to ten minutes, which we'll
* double, since this task can always catch up in off-peak hours. * double, since this task can always catch up in off-peak hours.

View file

@ -55,7 +55,7 @@ import java.util.concurrent.Callable;
import javax.inject.Inject; import javax.inject.Inject;
import org.joda.time.DateTime; import org.joda.time.DateTime;
/** Restore Registry 2 commit logs from GCS to datastore. */ /** Restore Registry 2 commit logs from GCS to Datastore. */
@Action( @Action(
path = RestoreCommitLogsAction.PATH, path = RestoreCommitLogsAction.PATH,
method = Action.Method.POST, method = Action.Method.POST,
@ -124,12 +124,12 @@ public class RestoreCommitLogsAction implements Runnable {
} }
/** /**
* Restore the contents of one transaction to datastore. * Restore the contents of one transaction to Datastore.
* *
* <p>The objects to delete are listed in the {@link CommitLogManifest}, which will be the first * <p>The objects to delete are listed in the {@link CommitLogManifest}, which will be the first
* object in the iterable. The objects to save follow, each as a {@link CommitLogMutation}. We * object in the iterable. The objects to save follow, each as a {@link CommitLogMutation}. We
* restore by deleting the deletes and recreating the saves from their proto form. We also save * restore by deleting the deletes and recreating the saves from their proto form. We also save
* the commit logs themselves back to datastore, so that the commit log system itself is * the commit logs themselves back to Datastore, so that the commit log system itself is
* transparently restored alongside the data. * transparently restored alongside the data.
* *
* @return the manifest, for use in restoring the {@link CommitLogBucket}. * @return the manifest, for use in restoring the {@link CommitLogBucket}.

View file

@ -1063,9 +1063,9 @@ public final class RegistryConfig {
} }
/** /**
* Returns the length of time before commit logs should be deleted from datastore. * Returns the length of time before commit logs should be deleted from Datastore.
* *
* <p>The only reason you'll want to retain this commit logs in datastore is for performing * <p>The only reason you'll want to retain this commit logs in Datastore is for performing
* point-in-time restoration queries for subsystems like RDE. * point-in-time restoration queries for subsystems like RDE.
* *
* @see google.registry.backup.DeleteOldCommitLogsAction * @see google.registry.backup.DeleteOldCommitLogsAction

View file

@ -29,7 +29,7 @@ package google.registry.dns.writer;
public interface DnsWriter extends AutoCloseable { public interface DnsWriter extends AutoCloseable {
/** /**
* Loads {@code domainName} from datastore and publishes its NS/DS records to the DNS server. * Loads {@code domainName} from Datastore and publishes its NS/DS records to the DNS server.
* Replaces existing records for the exact name supplied with an NS record for each name server * Replaces existing records for the exact name supplied with an NS record for each name server
* and a DS record for each delegation signer stored in the registry for the supplied domain name. * and a DS record for each delegation signer stored in the registry for the supplied domain name.
* If the domain is deleted or is in a "non-publish" state then any existing records are deleted. * If the domain is deleted or is in a "non-publish" state then any existing records are deleted.
@ -39,7 +39,7 @@ public interface DnsWriter extends AutoCloseable {
void publishDomain(String domainName); void publishDomain(String domainName);
/** /**
* Loads {@code hostName} from datastore and publishes its A/AAAA glue records to the DNS server, * Loads {@code hostName} from Datastore and publishes its A/AAAA glue records to the DNS server,
* if it is used as an in-bailiwick nameserver. Orphaned glue records are prohibited. Replaces * if it is used as an in-bailiwick nameserver. Orphaned glue records are prohibited. Replaces
* existing records for the exact name supplied, with an A or AAAA record (as appropriate) for * existing records for the exact name supplied, with an A or AAAA record (as appropriate) for
* each address stored in the registry, for the supplied host name. If the host is deleted then * each address stored in the registry, for the supplied host name. If the host is deleted then

View file

@ -16,9 +16,9 @@
We want it to be close to midnight because that reduces the chance that the We want it to be close to midnight because that reduces the chance that the
point-in-time code won't have to go to the extra trouble of fetching old point-in-time code won't have to go to the extra trouble of fetching old
versions of objects from the datastore. However, we don't want it to run too versions of objects from Datastore. However, we don't want it to run too
close to midnight, because there's always a chance that a change which was close to midnight, because there's always a chance that a change which was
timestamped before midnight hasn't fully been committed to the datastore. So timestamped before midnight hasn't fully been committed to Datastore. So
we add a 4+ minute grace period to ensure the transactions cool down, since we add a 4+ minute grace period to ensure the transactions cool down, since
our queries are not transactional. our queries are not transactional.
--> -->
@ -87,7 +87,7 @@
<cron> <cron>
<url><![CDATA[/_dr/cron/commitLogFanout?queue=delete-commits&endpoint=/_dr/task/deleteOldCommitLogs&jitterSeconds=600]]></url> <url><![CDATA[/_dr/cron/commitLogFanout?queue=delete-commits&endpoint=/_dr/task/deleteOldCommitLogs&jitterSeconds=600]]></url>
<description> <description>
This job deletes commit logs from datastore that are old, e.g. thirty days. This job deletes commit logs from Datastore that are old, e.g. thirty days.
</description> </description>
<schedule>every 20 minutes synchronized</schedule> <schedule>every 20 minutes synchronized</schedule>
<target>backend</target> <target>backend</target>
@ -158,7 +158,7 @@
<cron> <cron>
<url><![CDATA[/_dr/cron/fanout?queue=export-snapshot&endpoint=/_dr/task/exportSnapshot&runInEmpty]]></url> <url><![CDATA[/_dr/cron/fanout?queue=export-snapshot&endpoint=/_dr/task/exportSnapshot&runInEmpty]]></url>
<description> <description>
This job fires off a datastore backup-as-a-service job that generates snapshot files in GCS. This job fires off a Datastore backup-as-a-service job that generates snapshot files in GCS.
It also enqueues a new task to wait on the completion of that job and then load the resulting It also enqueues a new task to wait on the completion of that job and then load the resulting
snapshot into bigquery. snapshot into bigquery.
</description> </description>

View file

@ -113,19 +113,19 @@
<url-pattern>/_dr/task/verifyEntityIntegrity</url-pattern> <url-pattern>/_dr/task/verifyEntityIntegrity</url-pattern>
</servlet-mapping> </servlet-mapping>
<!-- Exports a datastore backup snapshot to GCS. --> <!-- Exports a Datastore backup snapshot to GCS. -->
<servlet-mapping> <servlet-mapping>
<servlet-name>backend-servlet</servlet-name> <servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/exportSnapshot</url-pattern> <url-pattern>/_dr/task/exportSnapshot</url-pattern>
</servlet-mapping> </servlet-mapping>
<!-- Checks the completion of a datastore backup snapshot. --> <!-- Checks the completion of a Datastore backup snapshot. -->
<servlet-mapping> <servlet-mapping>
<servlet-name>backend-servlet</servlet-name> <servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/checkSnapshot</url-pattern> <url-pattern>/_dr/task/checkSnapshot</url-pattern>
</servlet-mapping> </servlet-mapping>
<!-- Loads a datastore backup snapshot into BigQuery. --> <!-- Loads a Datastore backup snapshot into BigQuery. -->
<servlet-mapping> <servlet-mapping>
<servlet-name>backend-servlet</servlet-name> <servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/loadSnapshot</url-pattern> <url-pattern>/_dr/task/loadSnapshot</url-pattern>
@ -157,7 +157,7 @@
<url-pattern>/_dr/cron/commitLogFanout</url-pattern> <url-pattern>/_dr/cron/commitLogFanout</url-pattern>
</servlet-mapping> </servlet-mapping>
<!-- Deletes old commit logs from datastore. --> <!-- Deletes old commit logs from Datastore. -->
<servlet-mapping> <servlet-mapping>
<servlet-name>backend-servlet</servlet-name> <servlet-name>backend-servlet</servlet-name>
<url-pattern>/_dr/task/deleteOldCommitLogs</url-pattern> <url-pattern>/_dr/task/deleteOldCommitLogs</url-pattern>

View file

@ -37,7 +37,7 @@
<bucket-size>100</bucket-size> <bucket-size>100</bucket-size>
<retry-parameters> <retry-parameters>
<!-- Retry aggressively since a single delayed export increases our time window of <!-- Retry aggressively since a single delayed export increases our time window of
unrecoverable data loss in the event of a datastore failure. --> unrecoverable data loss in the event of a Datastore failure. -->
<min-backoff-seconds>1</min-backoff-seconds> <min-backoff-seconds>1</min-backoff-seconds>
<max-backoff-seconds>60</max-backoff-seconds> <max-backoff-seconds>60</max-backoff-seconds>
<!-- No age limit; a failed export should be retried as long as possible to avoid <!-- No age limit; a failed export should be retried as long as possible to avoid

View file

@ -48,7 +48,7 @@
<url-pattern>/_dr/loadtest</url-pattern> <url-pattern>/_dr/loadtest</url-pattern>
</servlet-mapping> </servlet-mapping>
<!-- Command line tool uses this endpoint to modify the datastore. --> <!-- The nomulus command line tool uses this endpoint to write to Datastore. -->
<servlet> <servlet>
<display-name>Remote API Servlet</display-name> <display-name>Remote API Servlet</display-name>
<servlet-name>RemoteApiServlet</servlet-name> <servlet-name>RemoteApiServlet</servlet-name>

View file

@ -16,9 +16,9 @@
We want it to be close to midnight because that reduces the chance that the We want it to be close to midnight because that reduces the chance that the
point-in-time code won't have to go to the extra trouble of fetching old point-in-time code won't have to go to the extra trouble of fetching old
versions of objects from the datastore. However, we don't want it to run too versions of objects from Datastore. However, we don't want it to run too
close to midnight, because there's always a chance that a change which was close to midnight, because there's always a chance that a change which was
timestamped before midnight hasn't fully been committed to the datastore. So timestamped before midnight hasn't fully been committed to Datastore. So
we add a 4+ minute grace period to ensure the transactions cool down, since we add a 4+ minute grace period to ensure the transactions cool down, since
our queries are not transactional. our queries are not transactional.
--> -->
@ -127,7 +127,7 @@
<cron> <cron>
<url><![CDATA[/_dr/cron/fanout?queue=export-snapshot&endpoint=/_dr/task/exportSnapshot&runInEmpty]]></url> <url><![CDATA[/_dr/cron/fanout?queue=export-snapshot&endpoint=/_dr/task/exportSnapshot&runInEmpty]]></url>
<description> <description>
This job fires off a datastore backup-as-a-service job that generates snapshot files in GCS. This job fires off a Datastore backup-as-a-service job that generates snapshot files in GCS.
It also enqueues a new task to wait on the completion of that job and then load the resulting It also enqueues a new task to wait on the completion of that job and then load the resulting
snapshot into bigquery. snapshot into bigquery.
</description> </description>

View file

@ -28,9 +28,9 @@
We want it to be close to midnight because that reduces the chance that the We want it to be close to midnight because that reduces the chance that the
point-in-time code won't have to go to the extra trouble of fetching old point-in-time code won't have to go to the extra trouble of fetching old
versions of objects from the datastore. However, we don't want it to run too versions of objects from Datastore. However, we don't want it to run too
close to midnight, because there's always a chance that a change which was close to midnight, because there's always a chance that a change which was
timestamped before midnight hasn't fully been committed to the datastore. So timestamped before midnight hasn't fully been committed to Datastore. So
we add a 4+ minute grace period to ensure the transactions cool down, since we add a 4+ minute grace period to ensure the transactions cool down, since
our queries are not transactional. our queries are not transactional.
--> -->
@ -108,7 +108,7 @@
<cron> <cron>
<url><![CDATA[/_dr/cron/commitLogFanout?queue=delete-commits&endpoint=/_dr/task/deleteOldCommitLogs&jitterSeconds=600]]></url> <url><![CDATA[/_dr/cron/commitLogFanout?queue=delete-commits&endpoint=/_dr/task/deleteOldCommitLogs&jitterSeconds=600]]></url>
<description> <description>
This job deletes commit logs from datastore that are old, e.g. thirty days. This job deletes commit logs from Datastore that are old, e.g. thirty days.
</description> </description>
<schedule>every 20 minutes synchronized</schedule> <schedule>every 20 minutes synchronized</schedule>
<target>backend</target> <target>backend</target>
@ -169,7 +169,7 @@
<cron> <cron>
<url><![CDATA[/_dr/cron/fanout?queue=export-snapshot&endpoint=/_dr/task/exportSnapshot&runInEmpty]]></url> <url><![CDATA[/_dr/cron/fanout?queue=export-snapshot&endpoint=/_dr/task/exportSnapshot&runInEmpty]]></url>
<description> <description>
This job fires off a datastore backup-as-a-service job that generates snapshot files in GCS. This job fires off a Datastore backup-as-a-service job that generates snapshot files in GCS.
It also enqueues a new task to wait on the completion of that job and then load the resulting It also enqueues a new task to wait on the completion of that job and then load the resulting
snapshot into bigquery. snapshot into bigquery.
</description> </description>

View file

@ -76,7 +76,7 @@
<cron> <cron>
<url><![CDATA[/_dr/cron/fanout?queue=export-snapshot&endpoint=/_dr/task/exportSnapshot&runInEmpty]]></url> <url><![CDATA[/_dr/cron/fanout?queue=export-snapshot&endpoint=/_dr/task/exportSnapshot&runInEmpty]]></url>
<description> <description>
This job fires off a datastore backup-as-a-service job that generates snapshot files in GCS. This job fires off a Datastore backup-as-a-service job that generates snapshot files in GCS.
It also enqueues a new task to wait on the completion of that job and then load the resulting It also enqueues a new task to wait on the completion of that job and then load the resulting
snapshot into bigquery. snapshot into bigquery.
</description> </description>
@ -146,7 +146,7 @@
<cron> <cron>
<url><![CDATA[/_dr/cron/commitLogFanout?queue=delete-commits&endpoint=/_dr/task/deleteOldCommitLogs&jitterSeconds=600]]></url> <url><![CDATA[/_dr/cron/commitLogFanout?queue=delete-commits&endpoint=/_dr/task/deleteOldCommitLogs&jitterSeconds=600]]></url>
<description> <description>
This job deletes commit logs from datastore that are old, e.g. thirty days. This job deletes commit logs from Datastore that are old, e.g. thirty days.
</description> </description>
<schedule>every 20 minutes synchronized</schedule> <schedule>every 20 minutes synchronized</schedule>
<target>backend</target> <target>backend</target>

View file

@ -93,7 +93,7 @@ public class CheckSnapshotAction implements Runnable {
String message = String.format("Bad backup name %s: %s", snapshotName, e.getMessage()); String message = String.format("Bad backup name %s: %s", snapshotName, e.getMessage());
// TODO(b/19081569): Ideally this would return a 2XX error so the task would not be // TODO(b/19081569): Ideally this would return a 2XX error so the task would not be
// retried but we might abandon backups that start late and haven't yet written to // retried but we might abandon backups that start late and haven't yet written to
// datastore. We could fix that by replacing this with a two-phase polling strategy. // Datastore. We could fix that by replacing this with a two-phase polling strategy.
throw new BadRequestException(message, e); throw new BadRequestException(message, e);
} }
} }

View file

@ -33,25 +33,25 @@ import java.util.List;
import org.joda.time.DateTime; import org.joda.time.DateTime;
import org.joda.time.Duration; import org.joda.time.Duration;
/** Container for information about a datastore backup. */ /** Container for information about a Datastore backup. */
public class DatastoreBackupInfo { public class DatastoreBackupInfo {
@NonFinalForTesting @NonFinalForTesting
private static Clock clock = new SystemClock(); private static Clock clock = new SystemClock();
/** The possible status values for a datastore backup. */ /** The possible status values for a Datastore backup. */
public enum BackupStatus { PENDING, COMPLETE } public enum BackupStatus { PENDING, COMPLETE }
/** The name of the datastore backup. */ /** The name of the Datastore backup. */
private final String backupName; private final String backupName;
/** The entity kinds included in this datastore backup. */ /** The entity kinds included in this Datastore backup. */
private final ImmutableSet<String> kinds; private final ImmutableSet<String> kinds;
/** The start time of the datastore backup. */ /** The start time of the Datastore backup. */
private final DateTime startTime; private final DateTime startTime;
/** The completion time of the datastore backup, present if it has completed. */ /** The completion time of the Datastore backup, present if it has completed. */
private final Optional<DateTime> completeTime; private final Optional<DateTime> completeTime;
/** /**

View file

@ -33,10 +33,10 @@ import com.google.common.collect.Iterables;
import google.registry.util.NonFinalForTesting; import google.registry.util.NonFinalForTesting;
import java.util.NoSuchElementException; import java.util.NoSuchElementException;
/** An object providing methods for starting and querying datastore backups. */ /** An object providing methods for starting and querying Datastore backups. */
public class DatastoreBackupService { public class DatastoreBackupService {
/** The internal kind name used for entities storing information about datastore backups. */ /** The internal kind name used for entities storing information about Datastore backups. */
static final String BACKUP_INFO_KIND = "_AE_Backup_Information"; static final String BACKUP_INFO_KIND = "_AE_Backup_Information";
/** The name of the app version used for hosting the Datastore Admin functionality. */ /** The name of the app version used for hosting the Datastore Admin functionality. */
@ -58,7 +58,7 @@ public class DatastoreBackupService {
} }
/** /**
* Generates the TaskOptions needed to trigger an AppEngine datastore backup job. * Generates the TaskOptions needed to trigger an AppEngine Datastore backup job.
* *
* @see <a href="https://developers.google.com/appengine/articles/scheduled_backups">Scheduled Backups</a> * @see <a href="https://developers.google.com/appengine/articles/scheduled_backups">Scheduled Backups</a>
*/ */
@ -79,7 +79,7 @@ public class DatastoreBackupService {
} }
/** /**
* Launches a new datastore backup with the given name, GCS bucket, and set of kinds by * Launches a new Datastore backup with the given name, GCS bucket, and set of kinds by
* submitting a task to the given task queue, and returns a handle to that task. * submitting a task to the given task queue, and returns a handle to that task.
*/ */
public TaskHandle launchNewBackup( public TaskHandle launchNewBackup(
@ -87,10 +87,10 @@ public class DatastoreBackupService {
return getQueue(queue).add(makeTaskOptions(queue, name, gcsBucket, kinds)); return getQueue(queue).add(makeTaskOptions(queue, name, gcsBucket, kinds));
} }
/** Return an iterable of all datastore backups whose names have the given string prefix. */ /** Return an iterable of all Datastore backups whose names have the given string prefix. */
public Iterable<DatastoreBackupInfo> findAllByNamePrefix(final String namePrefix) { public Iterable<DatastoreBackupInfo> findAllByNamePrefix(final String namePrefix) {
// Need the raw DatastoreService to access the internal _AE_Backup_Information entities. // Need the raw DatastoreService to access the internal _AE_Backup_Information entities.
// TODO(b/19081037): make an Objectify entity class for these raw datastore entities instead. // TODO(b/19081037): make an Objectify entity class for these raw Datastore entities instead.
return FluentIterable return FluentIterable
.from(getDatastoreService().prepare(new Query(BACKUP_INFO_KIND)).asIterable()) .from(getDatastoreService().prepare(new Query(BACKUP_INFO_KIND)).asIterable())
.filter(new Predicate<Entity>() { .filter(new Predicate<Entity>() {

View file

@ -29,10 +29,10 @@ import google.registry.model.annotations.VirtualEntity;
/** Constants related to export code. */ /** Constants related to export code. */
public final class ExportConstants { public final class ExportConstants {
/** Returns the names of kinds to include in datastore backups. */ /** Returns the names of kinds to include in Datastore backups. */
public static ImmutableSet<String> getBackupKinds() { public static ImmutableSet<String> getBackupKinds() {
// Back up all entity classes that aren't annotated with @VirtualEntity (never even persisted // Back up all entity classes that aren't annotated with @VirtualEntity (never even persisted
// to datastore, so they can't be backed up) or @NotBackedUp (intentionally omitted). // to Datastore, so they can't be backed up) or @NotBackedUp (intentionally omitted).
return FluentIterable.from(EntityClasses.ALL_CLASSES) return FluentIterable.from(EntityClasses.ALL_CLASSES)
.filter(not(hasAnnotation(VirtualEntity.class))) .filter(not(hasAnnotation(VirtualEntity.class)))
.filter(not(hasAnnotation(NotBackedUp.class))) .filter(not(hasAnnotation(NotBackedUp.class)))

View file

@ -25,7 +25,7 @@ import google.registry.util.FormattingLogger;
import javax.inject.Inject; import javax.inject.Inject;
/** /**
* Action to trigger a datastore backup job that writes a snapshot to Google Cloud Storage. * Action to trigger a Datastore backup job that writes a snapshot to Google Cloud Storage.
* *
* <p>This is the first step of a four step workflow for exporting snapshots, with each step calling * <p>This is the first step of a four step workflow for exporting snapshots, with each step calling
* the next upon successful completion: * the next upon successful completion:

View file

@ -108,7 +108,7 @@ public class LoadSnapshotAction implements Runnable {
Bigquery bigquery = bigqueryFactory.create(projectId, SNAPSHOTS_DATASET); Bigquery bigquery = bigqueryFactory.create(projectId, SNAPSHOTS_DATASET);
DateTime now = clock.nowUtc(); DateTime now = clock.nowUtc();
String loadMessage = String loadMessage =
String.format("Loading datastore snapshot %s from %s...", snapshotId, gcsFilename); String.format("Loading Datastore snapshot %s from %s...", snapshotId, gcsFilename);
logger.info(loadMessage); logger.info(loadMessage);
StringBuilder builder = new StringBuilder(loadMessage + "\n"); StringBuilder builder = new StringBuilder(loadMessage + "\n");
builder.append("Load jobs:\n"); builder.append("Load jobs:\n");

View file

@ -32,7 +32,7 @@ import google.registry.util.SqlTemplate;
import java.io.IOException; import java.io.IOException;
import javax.inject.Inject; import javax.inject.Inject;
/** Update a well-known view to point at a certain datastore snapshot table in BigQuery. */ /** Update a well-known view to point at a certain Datastore snapshot table in BigQuery. */
@Action(path = UpdateSnapshotViewAction.PATH, method = POST) @Action(path = UpdateSnapshotViewAction.PATH, method = POST)
public class UpdateSnapshotViewAction implements Runnable { public class UpdateSnapshotViewAction implements Runnable {

View file

@ -17,7 +17,7 @@ package google.registry.flows;
/** /**
* Interface for a {@link Flow} that needs to be run transactionally. * Interface for a {@link Flow} that needs to be run transactionally.
* *
* <p>Any flow that mutates the datastore should implement this so that {@link FlowRunner} will know * <p>Any flow that mutates Datastore should implement this so that {@link FlowRunner} will know how
* how to run it. * to run it.
*/ */
public interface TransactionalFlow extends Flow {} public interface TransactionalFlow extends Flow {}

View file

@ -62,7 +62,7 @@ public class MapreduceRunner {
private final Optional<Integer> httpParamMapShards; private final Optional<Integer> httpParamMapShards;
private final Optional<Integer> httpParamReduceShards; private final Optional<Integer> httpParamReduceShards;
// Default to 3 minutes since many slices will contain datastore queries that time out at 4:30. // Default to 3 minutes since many slices will contain Datastore queries that time out at 4:30.
private Duration sliceDuration = Duration.standardMinutes(3); private Duration sliceDuration = Duration.standardMinutes(3);
private String jobName; private String jobName;
private String moduleName; private String moduleName;

View file

@ -33,7 +33,7 @@ class EppResourceEntityReader<R extends EppResource> extends EppResourceBaseRead
* The resource classes to postfilter for. * The resource classes to postfilter for.
* *
* <p>This can be {@link EppResource} or any descendant classes, regardless of whether those * <p>This can be {@link EppResource} or any descendant classes, regardless of whether those
* classes map directly to a kind in datastore, with the restriction that none of the classes * classes map directly to a kind in Datastore, with the restriction that none of the classes
* is a supertype of any of the others. * is a supertype of any of the others.
*/ */
private final ImmutableSet<Class<? extends R>> resourceClasses; private final ImmutableSet<Class<? extends R>> resourceClasses;

View file

@ -26,7 +26,7 @@ import javax.xml.bind.annotation.XmlTransient;
*/ */
public abstract class BackupGroupRoot extends ImmutableObject { public abstract class BackupGroupRoot extends ImmutableObject {
/** /**
* An automatically managed timestamp of when this object was last written to datastore. * An automatically managed timestamp of when this object was last written to Datastore.
* *
* <p>Note that this is distinct from the EPP-specified {@link EppResource#lastEppUpdateTime}, in * <p>Note that this is distinct from the EPP-specified {@link EppResource#lastEppUpdateTime}, in
* that this is updated on every save, rather than only in response to an {@code <update>} command * that this is updated on every save, rather than only in response to an {@code <update>} command

View file

@ -29,7 +29,7 @@ public class CacheUtils {
* Memoize a supplier, with a short expiration specified in the environment config. * Memoize a supplier, with a short expiration specified in the environment config.
* *
* <p>Use this for things that might change while code is running. (For example, the various * <p>Use this for things that might change while code is running. (For example, the various
* lists downloaded from the TMCH get updated in datastore and the caches need to be refreshed.) * lists downloaded from the TMCH get updated in Datastore and the caches need to be refreshed.)
*/ */
public static <T> Supplier<T> memoizeWithShortExpiration(Supplier<T> original) { public static <T> Supplier<T> memoizeWithShortExpiration(Supplier<T> original) {
Duration expiration = getSingletonCacheRefreshDuration(); Duration expiration = getSingletonCacheRefreshDuration();

View file

@ -18,7 +18,7 @@ import google.registry.model.translators.CreateAutoTimestampTranslatorFactory;
import org.joda.time.DateTime; import org.joda.time.DateTime;
/** /**
* A timestamp that auto-updates when first saved to datastore. * A timestamp that auto-updates when first saved to Datastore.
* *
* @see CreateAutoTimestampTranslatorFactory * @see CreateAutoTimestampTranslatorFactory
*/ */

View file

@ -112,10 +112,10 @@ public final class EntityClasses {
TmchCrl.class); TmchCrl.class);
/** /**
* Function that converts an Objectify-registered class to its datastore kind name. * Function that converts an Objectify-registered class to its Datastore kind name.
* *
* <p>Note that this mapping is not one-to-one, since polymorphic subclasses of an entity all * <p>Note that this mapping is not one-to-one, since polymorphic subclasses of an entity all
* have the same datastore kind. (In theory, two distinct top-level entities could also map to * have the same Datastore kind. (In theory, two distinct top-level entities could also map to
* the same kind since it's just {@code class.getSimpleName()}, but we test against that.) * the same kind since it's just {@code class.getSimpleName()}, but we test against that.)
*/ */
public static final Function<Class<? extends ImmutableObject>, String> CLASS_TO_KIND_FUNCTION = public static final Function<Class<? extends ImmutableObject>, String> CLASS_TO_KIND_FUNCTION =

View file

@ -72,14 +72,14 @@ public final class EppResourceUtils {
} }
/** /**
* Loads the last created version of an {@link EppResource} from the datastore by foreign key. * Loads the last created version of an {@link EppResource} from Datastore by foreign key.
* *
* <p>Returns null if no resource with this foreign key was ever created, or if the most recently * <p>Returns null if no resource with this foreign key was ever created, or if the most recently
* created resource was deleted before time "now". * created resource was deleted before time "now".
* *
* <p>Loading an {@link EppResource} by itself is not sufficient to know its current state since * <p>Loading an {@link EppResource} by itself is not sufficient to know its current state since
* it may have various expirable conditions and status values that might implicitly change its * it may have various expirable conditions and status values that might implicitly change its
* state as time progresses even if it has not been updated in the datastore. Rather, the resource * state as time progresses even if it has not been updated in Datastore. Rather, the resource
* must be combined with a timestamp to view its current state. We use a global last updated * must be combined with a timestamp to view its current state. We use a global last updated
* timestamp on the entire entity group (which is essentially free since all writes to the entity * timestamp on the entire entity group (which is essentially free since all writes to the entity
* group must be serialized anyways) to guarantee monotonically increasing write times, so * group must be serialized anyways) to guarantee monotonically increasing write times, so
@ -131,7 +131,7 @@ public final class EppResourceUtils {
} }
/** /**
* Checks multiple {@link EppResource} objects from the datastore by unique ids. * Checks multiple {@link EppResource} objects from Datastore by unique ids.
* *
* <p>There are currently no resources that support checks and do not use foreign keys. If we need * <p>There are currently no resources that support checks and do not use foreign keys. If we need
* to support that case in the future, we can loosen the type to allow any {@link EppResource} and * to support that case in the future, we can loosen the type to allow any {@link EppResource} and
@ -236,7 +236,7 @@ public final class EppResourceUtils {
* perform a single asynchronous key fetch operation. * perform a single asynchronous key fetch operation.
* *
* <p><b>Warning:</b> A resource can only be rolled backwards in time, not forwards; therefore * <p><b>Warning:</b> A resource can only be rolled backwards in time, not forwards; therefore
* {@code resource} should be whatever's currently in datastore. * {@code resource} should be whatever's currently in Datastore.
* *
* <p><b>Warning:</b> Revisions are granular to 24-hour periods. It's recommended that * <p><b>Warning:</b> Revisions are granular to 24-hour periods. It's recommended that
* {@code timestamp} be set to midnight. Otherwise you must take into consideration that under * {@code timestamp} be set to midnight. Otherwise you must take into consideration that under
@ -277,7 +277,7 @@ public final class EppResourceUtils {
} }
/** /**
* Returns an asynchronous result holding the most recent datastore revision of a given * Returns an asynchronous result holding the most recent Datastore revision of a given
* EppResource before or at the provided timestamp using the EppResource revisions map, falling * EppResource before or at the provided timestamp using the EppResource revisions map, falling
* back to using the earliest revision or the resource as-is if there are no revisions. * back to using the earliest revision or the resource as-is if there are no revisions.
* *

View file

@ -129,7 +129,7 @@ public class ModelUtils {
static Set<Class<?>> getPersistedFieldTypes(Class<?> clazz) { static Set<Class<?>> getPersistedFieldTypes(Class<?> clazz) {
ImmutableSet.Builder<Class<?>> builder = new ImmutableSet.Builder<>(); ImmutableSet.Builder<Class<?>> builder = new ImmutableSet.Builder<>();
for (Field field : getAllFields(clazz).values()) { for (Field field : getAllFields(clazz).values()) {
// Skip fields that aren't persisted to datastore. // Skip fields that aren't persisted to Datastore.
if (field.isAnnotationPresent(Ignore.class)) { if (field.isAnnotationPresent(Ignore.class)) {
continue; continue;
} }

View file

@ -21,7 +21,7 @@ import google.registry.model.translators.UpdateAutoTimestampTranslatorFactory;
import org.joda.time.DateTime; import org.joda.time.DateTime;
/** /**
* A timestamp that auto-updates on each save to datastore. * A timestamp that auto-updates on each save to Datastore.
* *
* @see UpdateAutoTimestampTranslatorFactory * @see UpdateAutoTimestampTranslatorFactory
*/ */

View file

@ -22,7 +22,7 @@ import java.lang.annotation.Target;
/** /**
* Annotation for an Objectify {@link Entity} to indicate that it should not be backed up by the * Annotation for an Objectify {@link Entity} to indicate that it should not be backed up by the
* default datastore backup configuration (it may be backed up by something else). * default Datastore backup configuration (it may be backed up by something else).
*/ */
@Retention(RetentionPolicy.RUNTIME) @Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE) @Target(ElementType.TYPE)

View file

@ -218,7 +218,7 @@ public abstract class BillingEvent extends ImmutableObject
Integer periodYears = null; Integer periodYears = null;
/** /**
* For {@link Flag#SYNTHETIC} events, when this event was persisted to datastore (i.e. the * For {@link Flag#SYNTHETIC} events, when this event was persisted to Datastore (i.e. the
* cursor position at the time the recurrence expansion job was last run). In the event a job * cursor position at the time the recurrence expansion job was last run). In the event a job
* needs to be undone, a query on this field will return the complete set of potentially bad * needs to be undone, a query on this field will return the complete set of potentially bad
* events. * events.

View file

@ -42,7 +42,7 @@ import org.joda.time.DateTime;
* <p>This is a one-off single-entry bookkeeping system. There is a separate account for each * <p>This is a one-off single-entry bookkeeping system. There is a separate account for each
* (registrar, currency) pair. * (registrar, currency) pair.
* *
* <p>You should never update these entities once they've been inserted into datastore. If you need * <p>You should never update these entities once they've been inserted into Datastore. If you need
* to change something, add a correction entry. * to change something, add a correction entry.
*/ */
@Entity @Entity
@ -79,7 +79,7 @@ public class RegistrarBillingEntry extends ImmutableObject implements Jsonifiabl
* Currency of transaction. * Currency of transaction.
* *
* <p>This field is identical to {@code amount.getCurrencyUnit()} and is only here so it can be * <p>This field is identical to {@code amount.getCurrencyUnit()} and is only here so it can be
* indexed in datastore. * indexed in Datastore.
*/ */
@Index @Index
CurrencyUnit currency; CurrencyUnit currency;

View file

@ -21,7 +21,7 @@ import com.googlecode.objectify.annotation.Id;
import com.googlecode.objectify.annotation.Parent; import com.googlecode.objectify.annotation.Parent;
import google.registry.model.ImmutableObject; import google.registry.model.ImmutableObject;
/** A singleton entity in the datastore. */ /** A singleton entity in Datastore. */
public abstract class CrossTldSingleton extends ImmutableObject { public abstract class CrossTldSingleton extends ImmutableObject {
public static final long SINGLETON_ID = 1; // There is always exactly one of these. public static final long SINGLETON_ID = 1; // There is always exactly one of these.

View file

@ -36,7 +36,7 @@ import org.joda.time.DateTime;
@Entity @Entity
public class Cursor extends ImmutableObject { public class Cursor extends ImmutableObject {
/** The types of cursors, used as the string id field for each cursor in datastore. */ /** The types of cursors, used as the string id field for each cursor in Datastore. */
public enum CursorType { public enum CursorType {
/** Cursor for ensuring rolling transactional isolation of BRDA staging operation. */ /** Cursor for ensuring rolling transactional isolation of BRDA staging operation. */
BRDA(Registry.class), BRDA(Registry.class),

View file

@ -29,7 +29,7 @@ import google.registry.model.annotations.NotBackedUp.Reason;
/** /**
* A helper class to convert email addresses to GAE user ids. It does so by persisting a User * A helper class to convert email addresses to GAE user ids. It does so by persisting a User
* object with the email address to datastore, and then immediately reading it back. * object with the email address to Datastore, and then immediately reading it back.
*/ */
@Entity @Entity
@NotBackedUp(reason = Reason.TRANSIENT) @NotBackedUp(reason = Reason.TRANSIENT)

View file

@ -19,7 +19,7 @@ import com.google.common.collect.Range;
import com.googlecode.objectify.annotation.Embed; import com.googlecode.objectify.annotation.Embed;
import google.registry.model.ImmutableObject; import google.registry.model.ImmutableObject;
/** An object that's equivalent to a {@code Range<Long>} that can be persisted to datastore. */ /** An object that's equivalent to a {@code Range<Long>} that can be persisted to Datastore. */
@Embed @Embed
public class PersistedRangeLong extends ImmutableObject { public class PersistedRangeLong extends ImmutableObject {

View file

@ -38,21 +38,21 @@ import javax.annotation.Nullable;
import org.joda.time.DateTime; import org.joda.time.DateTime;
/** /**
* An entity property whose value transitions over time. Each value it takes on becomes active * An entity property whose value transitions over time. Each value it takes on becomes active at a
* at a corresponding instant, and remains active until the next transition occurs. At least one * corresponding instant, and remains active until the next transition occurs. At least one "start
* "start of time" value (corresponding to START_OF_TIME, i.e. the Unix epoch) must be provided * of time" value (corresponding to START_OF_TIME, i.e. the Unix epoch) must be provided so that the
* so that the property will have a value for all possible times. * property will have a value for all possible times.
* *
* <p>This concept is naturally represented by a sorted map of {@code DateTime} to {@code V}, but * <p>This concept is naturally represented by a sorted map of {@code DateTime} to {@code V}, but
* the AppEngine datastore cannot natively represent a map keyed on non-strings. Instead, we store * the App Engine Datastore cannot natively represent a map keyed on non-strings. Instead, we store
* an ordered list of transitions and use Objectify's @Mapify annotation to automatically recreate * an ordered list of transitions and use Objectify's @Mapify annotation to automatically recreate
* the sorted map on load from the datastore, which is used as a backing map for this property; the * the sorted map on load from Datastore, which is used as a backing map for this property; the
* property itself also implements Map by way of extending ForwardingMap, so that this property can * property itself also implements Map by way of extending ForwardingMap, so that this property can
* stored directly as the @Mapify field in the entity. * stored directly as the @Mapify field in the entity.
* *
* <p>The type parameter {@code T} specifies a user-defined subclass of {@code TimedTransition<V>} * <p>The type parameter {@code T} specifies a user-defined subclass of {@code TimedTransition<V>}
* to use for storing the list of transitions. The user is given this choice of subclass so that * to use for storing the list of transitions. The user is given this choice of subclass so that the
* the field of the value type stored in the transition can be given a customized name. * field of the value type stored in the transition can be given a customized name.
*/ */
public class TimedTransitionProperty<V, T extends TimedTransitionProperty.TimedTransition<V>> public class TimedTransitionProperty<V, T extends TimedTransitionProperty.TimedTransition<V>>
extends ForwardingMap<DateTime, T> { extends ForwardingMap<DateTime, T> {
@ -62,7 +62,7 @@ public class TimedTransitionProperty<V, T extends TimedTransitionProperty.TimedT
* for the {@code DateTime}, which means that subclasses should supply the field of type {@code V} * for the {@code DateTime}, which means that subclasses should supply the field of type {@code V}
* and implementations of the abstract getter and setter methods to access that field. This design * and implementations of the abstract getter and setter methods to access that field. This design
* is so that subclasses tagged with @Embed can define a custom field name for their value, for * is so that subclasses tagged with @Embed can define a custom field name for their value, for
* the purpose of backwards compatibility and better readability of the datastore representation. * the purpose of backwards compatibility and better readability of the Datastore representation.
* *
* <p>The public visibility of this class exists only so that it can be subclassed; clients should * <p>The public visibility of this class exists only so that it can be subclassed; clients should
* never call any methods on this class or attempt to access its members, but should instead treat * never call any methods on this class or attempt to access its members, but should instead treat
@ -235,17 +235,15 @@ public class TimedTransitionProperty<V, T extends TimedTransitionProperty.TimedT
} }
/** /**
* Returns a new mutable {@code TimedTransitionProperty} representing the given map of DateTime * Returns a new mutable {@code TimedTransitionProperty} representing the given map of DateTime to
* to value, with transitions constructed using the given {@code TimedTransition} subclass. * value, with transitions constructed using the given {@code TimedTransition} subclass.
* *
* <p>This method should only be used for initializing fields that are declared with the * <p>This method should only be used for initializing fields that are declared with the @Mapify
* @Mapify annotation. The map for those fields must be mutable so that Objectify can load values * annotation. The map for those fields must be mutable so that Objectify can load values from
* from the datastore into the map, but clients should still never mutate the field's map * Datastore into the map, but clients should still never mutate the field's map directly.
* directly.
*/ */
public static <V, T extends TimedTransition<V>> TimedTransitionProperty<V, T> forMapify( public static <V, T extends TimedTransition<V>> TimedTransitionProperty<V, T> forMapify(
ImmutableSortedMap<DateTime, V> valueMap, ImmutableSortedMap<DateTime, V> valueMap, Class<T> timedTransitionSubclass) {
Class<T> timedTransitionSubclass) {
return new TimedTransitionProperty<>( return new TimedTransitionProperty<>(
new TreeMap<>(makeTransitionMap(valueMap, timedTransitionSubclass))); new TreeMap<>(makeTransitionMap(valueMap, timedTransitionSubclass)));
} }
@ -254,10 +252,9 @@ public class TimedTransitionProperty<V, T extends TimedTransitionProperty.TimedT
* Returns a new mutable {@code TimedTransitionProperty} representing the given value being set at * Returns a new mutable {@code TimedTransitionProperty} representing the given value being set at
* start of time, constructed using the given {@code TimedTransition} subclass. * start of time, constructed using the given {@code TimedTransition} subclass.
* *
* <p>This method should only be used for initializing fields that are declared with the * <p>This method should only be used for initializing fields that are declared with the @Mapify
* @Mapify annotation. The map for those fields must be mutable so that Objectify can load values * annotation. The map for those fields must be mutable so that Objectify can load values from
* from the datastore into the map, but clients should still never mutate the field's map * Datastore into the map, but clients should still never mutate the field's map directly.
* directly.
*/ */
public static <V, T extends TimedTransition<V>> TimedTransitionProperty<V, T> forMapify( public static <V, T extends TimedTransition<V>> TimedTransitionProperty<V, T> forMapify(
V valueAtStartOfTime, Class<T> timedTransitionSubclass) { V valueAtStartOfTime, Class<T> timedTransitionSubclass) {

View file

@ -127,7 +127,7 @@ public class ContactCommand {
* Unique identifier for this contact. * Unique identifier for this contact.
* *
* <p>This is only unique in the sense that for any given lifetime specified as the time range * <p>This is only unique in the sense that for any given lifetime specified as the time range
* from (creationTime, deletionTime) there can only be one contact in the datastore with this * from (creationTime, deletionTime) there can only be one contact in Datastore with this
* id. However, there can be many contacts with the same id and non-overlapping lifetimes. * id. However, there can be many contacts with the same id and non-overlapping lifetimes.
*/ */
@XmlElement(name = "id") @XmlElement(name = "id")

View file

@ -54,7 +54,7 @@ public class ContactResource extends EppResource
* Unique identifier for this contact. * Unique identifier for this contact.
* *
* <p>This is only unique in the sense that for any given lifetime specified as the time range * <p>This is only unique in the sense that for any given lifetime specified as the time range
* from (creationTime, deletionTime) there can only be one contact in the datastore with this id. * from (creationTime, deletionTime) there can only be one contact in Datastore with this id.
* However, there can be many contacts with the same id and non-overlapping lifetimes. * However, there can be many contacts with the same id and non-overlapping lifetimes.
*/ */
String contactId; String contactId;
@ -163,7 +163,7 @@ public class ContactResource extends EppResource
* Postal info for the contact. * Postal info for the contact.
* *
* <p>The XML marshalling expects the {@link PostalInfo} objects in a list, but we can't actually * <p>The XML marshalling expects the {@link PostalInfo} objects in a list, but we can't actually
* persist them to datastore that way because Objectify can't handle collections of embedded * persist them to Datastore that way because Objectify can't handle collections of embedded
* objects that themselves contain collections, and there's a list of streets inside. This method * objects that themselves contain collections, and there's a list of streets inside. This method
* transforms the persisted format to the XML format for marshalling. * transforms the persisted format to the XML format for marshalling.
*/ */

View file

@ -59,7 +59,7 @@ public abstract class DomainBase extends EppResource {
* Fully qualified domain name (puny-coded), which serves as the foreign key for this domain. * Fully qualified domain name (puny-coded), which serves as the foreign key for this domain.
* *
* <p>This is only unique in the sense that for any given lifetime specified as the time range * <p>This is only unique in the sense that for any given lifetime specified as the time range
* from (creationTime, deletionTime) there can only be one domain in the datastore with this name. * from (creationTime, deletionTime) there can only be one domain in Datastore with this name.
* However, there can be many domains with the same name and non-overlapping lifetimes. * However, there can be many domains with the same name and non-overlapping lifetimes.
* *
* @invariant fullyQualifiedDomainName == fullyQualifiedDomainName.toLowerCase() * @invariant fullyQualifiedDomainName == fullyQualifiedDomainName.toLowerCase()

View file

@ -287,7 +287,7 @@ public class DomainResource extends DomainBase
.setAutorenewBillingEvent(transferData.getServerApproveAutorenewEvent()) .setAutorenewBillingEvent(transferData.getServerApproveAutorenewEvent())
.setAutorenewPollMessage(transferData.getServerApproveAutorenewPollMessage()) .setAutorenewPollMessage(transferData.getServerApproveAutorenewPollMessage())
// Set the grace period using a key to the prescheduled transfer billing event. Not using // Set the grace period using a key to the prescheduled transfer billing event. Not using
// GracePeriod.forBillingEvent() here in order to avoid the actual datastore fetch. // GracePeriod.forBillingEvent() here in order to avoid the actual Datastore fetch.
.setGracePeriods(ImmutableSet.of(GracePeriod.create( .setGracePeriods(ImmutableSet.of(GracePeriod.create(
GracePeriodStatus.TRANSFER, GracePeriodStatus.TRANSFER,
transferExpirationTime.plus(Registry.get(getTld()).getTransferGracePeriodLength()), transferExpirationTime.plus(Registry.get(getTld()).getTransferGracePeriodLength()),

View file

@ -29,7 +29,7 @@ import org.joda.time.DateTime;
* A domain grace period with an expiration time. * A domain grace period with an expiration time.
* *
* <p>When a grace period expires, it is lazily removed from the {@link DomainResource} the next * <p>When a grace period expires, it is lazily removed from the {@link DomainResource} the next
* time the resource is loaded from the datastore. * time the resource is loaded from Datastore.
*/ */
@Embed @Embed
public class GracePeriod extends ImmutableObject { public class GracePeriod extends ImmutableObject {
@ -120,7 +120,7 @@ public class GracePeriod extends ImmutableObject {
* Creates a GracePeriod for an (optional) OneTime billing event. * Creates a GracePeriod for an (optional) OneTime billing event.
* *
* <p>Normal callers should always use {@link #forBillingEvent} instead, assuming they do not * <p>Normal callers should always use {@link #forBillingEvent} instead, assuming they do not
* need to avoid loading the BillingEvent from datastore. This method should typically be * need to avoid loading the BillingEvent from Datastore. This method should typically be
* called only from test code to explicitly construct GracePeriods. * called only from test code to explicitly construct GracePeriods.
*/ */
public static GracePeriod create( public static GracePeriod create(

View file

@ -13,7 +13,7 @@
// limitations under the License. // limitations under the License.
/** /**
* Nomulus datastore model common/shared classes. * Nomulus Datastore model common/shared classes.
* *
* <p>This package is intended to hold classes which are shared across multiple XML namespaces. As * <p>This package is intended to hold classes which are shared across multiple XML namespaces. As
* such, no default namespace is declared in this package, and all objects in this package should be * such, no default namespace is declared in this package, and all objects in this package should be

View file

@ -61,7 +61,7 @@ public class HostResource extends EppResource implements ForeignKeyedEppResource
* Fully qualified hostname, which is a unique identifier for this host. * Fully qualified hostname, which is a unique identifier for this host.
* *
* <p>This is only unique in the sense that for any given lifetime specified as the time range * <p>This is only unique in the sense that for any given lifetime specified as the time range
* from (creationTime, deletionTime) there can only be one host in the datastore with this name. * from (creationTime, deletionTime) there can only be one host in Datastore with this name.
* However, there can be many hosts with the same name and non-overlapping lifetimes. * However, there can be many hosts with the same name and non-overlapping lifetimes.
*/ */
@Index @Index

View file

@ -124,7 +124,7 @@ public abstract class ForeignKeyIndex<E extends EppResource> extends BackupGroup
} }
/** /**
* Loads a {@link Key} to an {@link EppResource} from the datastore by foreign key. * Loads a {@link Key} to an {@link EppResource} from Datastore by foreign key.
* *
* <p>Returns null if no foreign key index with this foreign key was ever created, or if the * <p>Returns null if no foreign key index with this foreign key was ever created, or if the
* most recently created foreign key index was deleted before time "now". This method does not * most recently created foreign key index was deleted before time "now". This method does not

View file

@ -47,7 +47,7 @@ import org.joda.time.DateTime;
* transaction throughput, while maintaining the ability to perform strongly-consistent ancestor * transaction throughput, while maintaining the ability to perform strongly-consistent ancestor
* queries. * queries.
* *
* @see <a href="https://cloud.google.com/appengine/articles/scaling/contention">Avoiding datastore * @see <a href="https://cloud.google.com/appengine/articles/scaling/contention">Avoiding Datastore
* contention</a> * contention</a>
*/ */
@Entity @Entity

View file

@ -32,14 +32,14 @@ import java.util.Objects;
import org.joda.time.DateTime; import org.joda.time.DateTime;
/** /**
* Entity representing a point-in-time consistent view of datastore, based on commit logs. * Entity representing a point-in-time consistent view of Datastore, based on commit logs.
* *
* <p>Conceptually, this entity consists of two pieces of information: the checkpoint "wall" time * <p>Conceptually, this entity consists of two pieces of information: the checkpoint "wall" time
* and a set of bucket checkpoint times. The former is the ID for this checkpoint (constrained * and a set of bucket checkpoint times. The former is the ID for this checkpoint (constrained
* to be unique upon checkpoint creation) and also represents the approximate wall time of the * to be unique upon checkpoint creation) and also represents the approximate wall time of the
* consistent datastore view this checkpoint represents. The latter is really a mapping from * consistent Datastore view this checkpoint represents. The latter is really a mapping from
* bucket ID to timestamp, where the timestamp dictates the upper bound (inclusive) on commit logs * bucket ID to timestamp, where the timestamp dictates the upper bound (inclusive) on commit logs
* from that bucket to include when restoring the datastore to this checkpoint. * from that bucket to include when restoring Datastore to this checkpoint.
*/ */
@Entity @Entity
@NotBackedUp(reason = Reason.COMMIT_LOGS) @NotBackedUp(reason = Reason.COMMIT_LOGS)

View file

@ -30,7 +30,7 @@ import java.util.Set;
import org.joda.time.DateTime; import org.joda.time.DateTime;
/** /**
* Archived datastore transaction that can be replayed. * Archived Datastore transaction that can be replayed.
* *
* <p>Entities of this kind are entity group sharded using a {@link CommitLogBucket} parent. Each * <p>Entities of this kind are entity group sharded using a {@link CommitLogBucket} parent. Each
* object that was saved during this transaction is stored in a {@link CommitLogMutation} child * object that was saved during this transaction is stored in a {@link CommitLogMutation} child

View file

@ -62,14 +62,14 @@ public class CommitLogMutation extends ImmutableObject {
* Returns a new mutation entity created from an @Entity ImmutableObject instance. * Returns a new mutation entity created from an @Entity ImmutableObject instance.
* *
* <p>The mutation key is generated deterministically from the {@code entity} key. The object is * <p>The mutation key is generated deterministically from the {@code entity} key. The object is
* converted to a raw datastore Entity, serialized to bytes, and stored within the mutation. * converted to a raw Datastore Entity, serialized to bytes, and stored within the mutation.
*/ */
public static CommitLogMutation create(Key<CommitLogManifest> parent, Object entity) { public static CommitLogMutation create(Key<CommitLogManifest> parent, Object entity) {
return createFromRaw(parent, ofy().save().toEntity(entity)); return createFromRaw(parent, ofy().save().toEntity(entity));
} }
/** /**
* Returns a new mutation entity created from a raw datastore Entity instance. * Returns a new mutation entity created from a raw Datastore Entity instance.
* *
* <p>The mutation key is generated deterministically from the {@code entity} key. The Entity * <p>The mutation key is generated deterministically from the {@code entity} key. The Entity
* itself is serialized to bytes and stored within the returned mutation. * itself is serialized to bytes and stored within the returned mutation.

View file

@ -102,8 +102,8 @@ public class ObjectifyService {
@Override @Override
protected AsyncDatastoreService createRawAsyncDatastoreService(DatastoreServiceConfig cfg) { protected AsyncDatastoreService createRawAsyncDatastoreService(DatastoreServiceConfig cfg) {
// In the unit test environment, wrap the datastore service in a proxy that can be used to // In the unit test environment, wrap the Datastore service in a proxy that can be used to
// examine the number of requests sent to datastore. // examine the number of requests sent to Datastore.
AsyncDatastoreService service = super.createRawAsyncDatastoreService(cfg); AsyncDatastoreService service = super.createRawAsyncDatastoreService(cfg);
return RegistryEnvironment.get().equals(RegistryEnvironment.UNITTEST) return RegistryEnvironment.get().equals(RegistryEnvironment.UNITTEST)
? new RequestCapturingAsyncDatastoreService(service) ? new RequestCapturingAsyncDatastoreService(service)
@ -134,7 +134,7 @@ public class ObjectifyService {
} }
} }
/** Register classes that can be persisted via Objectify as datastore entities. */ /** Register classes that can be persisted via Objectify as Datastore entities. */
private static void registerEntityClasses( private static void registerEntityClasses(
Iterable<Class<? extends ImmutableObject>> entityClasses) { Iterable<Class<? extends ImmutableObject>> entityClasses) {
// Register all the @Entity classes before any @EntitySubclass classes so that we can check // Register all the @Entity classes before any @EntitySubclass classes so that we can check

View file

@ -69,7 +69,7 @@ public class Ofy {
* *
* <p>This value should used as a cache expiration time for any entities annotated with an * <p>This value should used as a cache expiration time for any entities annotated with an
* Objectify {@code @Cache} annotation, to put an upper bound on unlikely-but-possible divergence * Objectify {@code @Cache} annotation, to put an upper bound on unlikely-but-possible divergence
* between memcache and datastore when a memcache write fails. * between memcache and Datastore when a memcache write fails.
*/ */
public static final int RECOMMENDED_MEMCACHE_EXPIRATION = 3600; public static final int RECOMMENDED_MEMCACHE_EXPIRATION = 3600;
@ -230,7 +230,7 @@ public class Ofy {
| DatastoreFailureException e) { | DatastoreFailureException e) {
// TransientFailureExceptions come from task queues and always mean nothing committed. // TransientFailureExceptions come from task queues and always mean nothing committed.
// TimestampInversionExceptions are thrown by our code and are always retryable as well. // TimestampInversionExceptions are thrown by our code and are always retryable as well.
// However, datastore exceptions might get thrown even if the transaction succeeded. // However, Datastore exceptions might get thrown even if the transaction succeeded.
if ((e instanceof DatastoreTimeoutException || e instanceof DatastoreFailureException) if ((e instanceof DatastoreTimeoutException || e instanceof DatastoreFailureException)
&& checkIfAlreadySucceeded(work)) { && checkIfAlreadySucceeded(work)) {
return work.getResult(); return work.getResult();
@ -255,10 +255,10 @@ public class Ofy {
CommitLogManifest manifest = work.getManifest(); CommitLogManifest manifest = work.getManifest();
if (manifest == null) { if (manifest == null) {
// Work ran but no commit log was created. This might mean that the transaction did not // Work ran but no commit log was created. This might mean that the transaction did not
// write anything to datastore. We can safely retry because it only reads. (Although the // write anything to Datastore. We can safely retry because it only reads. (Although the
// transaction might have written a task to a queue, we consider that safe to retry too // transaction might have written a task to a queue, we consider that safe to retry too
// since we generally assume that tasks might be doubly executed.) Alternatively it // since we generally assume that tasks might be doubly executed.) Alternatively it
// might mean that the transaction wrote to datastore but turned off commit logs by // might mean that the transaction wrote to Datastore but turned off commit logs by
// exclusively using save/deleteWithoutBackups() rather than save/delete(). Although we // exclusively using save/deleteWithoutBackups() rather than save/delete(). Although we
// have no hard proof that retrying is safe, we use these methods judiciously and it is // have no hard proof that retrying is safe, we use these methods judiciously and it is
// reasonable to assume that if the transaction really did succeed that the retry will // reasonable to assume that if the transaction really did succeed that the retry will
@ -300,7 +300,7 @@ public class Ofy {
/** /**
* Execute some work with a fresh session cache. * Execute some work with a fresh session cache.
* *
* <p>This is useful in cases where we want to load the latest possible data from datastore but * <p>This is useful in cases where we want to load the latest possible data from Datastore but
* don't need point-in-time consistency across loads and consequently don't need a transaction. * don't need point-in-time consistency across loads and consequently don't need a transaction.
* Note that unlike a transaction's fresh session cache, the contents of this cache will be * Note that unlike a transaction's fresh session cache, the contents of this cache will be
* discarded once the work completes, rather than being propagated into the enclosing session. * discarded once the work completes, rather than being propagated into the enclosing session.

View file

@ -39,9 +39,9 @@ public class RequestCapturingAsyncDatastoreService implements AsyncDatastoreServ
private final AsyncDatastoreService delegate; private final AsyncDatastoreService delegate;
// Each outer lists represents datastore operations, with inner lists representing the keys or // Each outer lists represents Datastore operations, with inner lists representing the keys or
// entities involved in that operation. We use static lists because we care about overall calls to // entities involved in that operation. We use static lists because we care about overall calls to
// datastore, not calls via a specific instance of the service. // Datastore, not calls via a specific instance of the service.
private static List<List<Key>> reads = synchronizedList(new ArrayList<List<Key>>()); private static List<List<Key>> reads = synchronizedList(new ArrayList<List<Key>>());
private static List<List<Key>> deletes = synchronizedList(new ArrayList<List<Key>>()); private static List<List<Key>> deletes = synchronizedList(new ArrayList<List<Key>>());

View file

@ -24,8 +24,8 @@ import java.util.Map;
import org.joda.time.DateTime; import org.joda.time.DateTime;
/** /**
* Exception when trying to write to the datastore with a timestamp that is inconsistent with * Exception when trying to write to Datastore with a timestamp that is inconsistent with a partial
* a partial ordering on transactions that touch the same entities. * ordering on transactions that touch the same entities.
*/ */
class TimestampInversionException extends RuntimeException { class TimestampInversionException extends RuntimeException {

View file

@ -42,6 +42,6 @@ import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
* <p>Command classes are never persisted, and the Objectify annotations on the Create and Update * <p>Command classes are never persisted, and the Objectify annotations on the Create and Update
* classes are purely for the benefit of the derived Resource classes that inherit from them. * classes are purely for the benefit of the derived Resource classes that inherit from them.
* Whenever a command that mutates the model is executed, a HistoryEvent is stored with the affected * Whenever a command that mutates the model is executed, a HistoryEvent is stored with the affected
* Resource as its datastore parent. All history entries have an indexed modification time field so * Resource as its Datastore parent. All history entries have an indexed modification time field so
* that the history can be read in chronological order. * that the history can be read in chronological order.
*/ */

View file

@ -62,10 +62,10 @@ public final class RdeRevision extends ImmutableObject {
* Sets the revision ID for a given triplet. * Sets the revision ID for a given triplet.
* *
* <p>This method verifies that the current revision is {@code revision - 1}, or that the object * <p>This method verifies that the current revision is {@code revision - 1}, or that the object
* does not exist in datastore if {@code revision == 0}. * does not exist in Datastore if {@code revision == 0}.
* *
* @throws IllegalStateException if not in a transaction * @throws IllegalStateException if not in a transaction
* @throws VerifyException if datastore state doesn't meet the above criteria * @throws VerifyException if Datastore state doesn't meet the above criteria
*/ */
public static void saveRevision(String tld, DateTime date, RdeMode mode, int revision) { public static void saveRevision(String tld, DateTime date, RdeMode mode, int revision) {
checkArgument(revision >= 0, "Negative revision: %s", revision); checkArgument(revision >= 0, "Negative revision: %s", revision);

View file

@ -197,13 +197,13 @@ public class Registrar extends ImmutableObject implements Buildable, Jsonifiable
* on its length. * on its length.
* *
* <p>NB: We are assuming that this field is unique across all registrar entities. This is not * <p>NB: We are assuming that this field is unique across all registrar entities. This is not
* formally enforced in our datastore, but should be enforced by ICANN in that no two registrars * formally enforced in Datastore, but should be enforced by ICANN in that no two registrars will
* will be accredited with the same name. * be accredited with the same name.
* *
* @see <a href="http://www.icann.org/registrar-reports/accredited-list.html">ICANN-Accredited Registrars</a> * @see <a href="http://www.icann.org/registrar-reports/accredited-list.html">ICANN-Accredited
* Registrars</a>
*/ */
@Index @Index String registrarName;
String registrarName;
/** The type of this registrar. */ /** The type of this registrar. */
Type type; Type type;

View file

@ -857,13 +857,13 @@ public class Registry extends ImmutableObject implements Buildable {
"Cannot create registry for TLD that is not a valid, canonical domain name"); "Cannot create registry for TLD that is not a valid, canonical domain name");
// Check the validity of all TimedTransitionProperties to ensure that they have values for // Check the validity of all TimedTransitionProperties to ensure that they have values for
// START_OF_TIME. The setters above have already checked this for new values, but also check // START_OF_TIME. The setters above have already checked this for new values, but also check
// here to catch cases where we loaded an invalid TimedTransitionProperty from datastore and // here to catch cases where we loaded an invalid TimedTransitionProperty from Datastore and
// cloned it into a new builder, to block re-building a Registry in an invalid state. // cloned it into a new builder, to block re-building a Registry in an invalid state.
instance.tldStateTransitions.checkValidity(); instance.tldStateTransitions.checkValidity();
instance.renewBillingCostTransitions.checkValidity(); instance.renewBillingCostTransitions.checkValidity();
instance.eapFeeSchedule.checkValidity(); instance.eapFeeSchedule.checkValidity();
// All costs must be in the expected currency. // All costs must be in the expected currency.
// TODO(b/21854155): When we move PremiumList into datastore, verify its currency too. // TODO(b/21854155): When we move PremiumList into Datastore, verify its currency too.
checkArgument( checkArgument(
instance.getStandardCreateCost().getCurrencyUnit().equals(instance.currency), instance.getStandardCreateCost().getCurrencyUnit().equals(instance.currency),
"Create cost must be in the registry's currency"); "Create cost must be in the registry's currency");

View file

@ -156,7 +156,7 @@ public class Lock extends ImmutableObject {
@Override @Override
public void vrun() { public void vrun() {
// To release a lock, check that no one else has already obtained it and if not delete it. // To release a lock, check that no one else has already obtained it and if not delete it.
// If the lock in datastore was different then this lock is gone already; this can happen // If the lock in Datastore was different then this lock is gone already; this can happen
// if release() is called around the expiration time and the lock expires underneath us. // if release() is called around the expiration time and the lock expires underneath us.
Lock loadedLock = ofy().load().type(Lock.class).id(lockId).now(); Lock loadedLock = ofy().load().type(Lock.class).id(lockId).now();
if (Lock.this.equals(loadedLock)) { if (Lock.this.equals(loadedLock)) {

View file

@ -39,11 +39,11 @@ public class ServerSecret extends CrossTldSingleton {
long leastSignificant; long leastSignificant;
/** /**
* Get the server secret, creating it if the datastore doesn't have one already. * Get the server secret, creating it if the Datastore doesn't have one already.
* *
* <p>There's a tiny risk of a race here if two calls to this happen simultaneously and create * <p>There's a tiny risk of a race here if two calls to this happen simultaneously and create
* different keys, in which case one of the calls will end up with an incorrect key. However, this * different keys, in which case one of the calls will end up with an incorrect key. However, this
* happens precisely once in the history of the system (after that it's always in datastore) so * happens precisely once in the history of the system (after that it's always in Datastore) so
* it's not worth worrying about. * it's not worth worrying about.
*/ */
public static UUID getServerSecret() { public static UUID getServerSecret() {

View file

@ -89,7 +89,7 @@ public class SignedMarkRevocationList extends ImmutableObject {
boolean isShard; boolean isShard;
/** /**
* A cached supplier that fetches the SMDRL shards from the datastore and recombines them into a * A cached supplier that fetches the SMDRL shards from Datastore and recombines them into a
* single {@link SignedMarkRevocationList} object. * single {@link SignedMarkRevocationList} object.
*/ */
private static final Supplier<SignedMarkRevocationList> CACHE = private static final Supplier<SignedMarkRevocationList> CACHE =
@ -119,7 +119,7 @@ public class SignedMarkRevocationList extends ImmutableObject {
}}); }});
}}); }});
/** Return a single logical instance that combines all the datastore shards. */ /** Return a single logical instance that combines all Datastore shards. */
public static SignedMarkRevocationList get() { public static SignedMarkRevocationList get() {
return CACHE.get(); return CACHE.get();
} }
@ -152,7 +152,7 @@ public class SignedMarkRevocationList extends ImmutableObject {
return revokes.size(); return revokes.size();
} }
/** Save this list to the datastore in sharded form. Returns {@code this}. */ /** Save this list to Datastore in sharded form. Returns {@code this}. */
public SignedMarkRevocationList save() { public SignedMarkRevocationList save() {
ofy().transact(new VoidWork() { ofy().transact(new VoidWork() {
@Override @Override

View file

@ -93,7 +93,7 @@ public class ClaimsListShard extends ImmutableObject {
boolean isShard = false; boolean isShard = false;
/** /**
* A cached supplier that fetches the claims list shards from the datastore and recombines them * A cached supplier that fetches the claims list shards from Datastore and recombines them
* into a single {@link ClaimsListShard} object. * into a single {@link ClaimsListShard} object.
*/ */
private static final Supplier<ClaimsListShard> CACHE = private static final Supplier<ClaimsListShard> CACHE =
@ -203,7 +203,7 @@ public class ClaimsListShard extends ImmutableObject {
return instance; return instance;
} }
/** Return a single logical instance that combines all the datastore shards. */ /** Return a single logical instance that combines all Datastore shards. */
@Nullable @Nullable
public static ClaimsListShard get() { public static ClaimsListShard get() {
return CACHE.get(); return CACHE.get();

View file

@ -46,10 +46,10 @@ public final class TmchCrl extends CrossTldSingleton {
} }
/** /**
* Change the datastore singleton to a new ASCII-armored X.509 CRL. * Change the Datastore singleton to a new ASCII-armored X.509 CRL.
* *
* <p>Please do not call this function unless your CRL is properly formatted, signed by the root, * <p>Please do not call this function unless your CRL is properly formatted, signed by the root,
* and actually newer than the one currently in the datastore. * and actually newer than the one currently in Datastore.
*/ */
public static void set(final String crl, final String url) { public static void set(final String crl, final String url) {
ofy().transactNew(new VoidWork() { ofy().transactNew(new VoidWork() {

View file

@ -36,7 +36,7 @@ public class CreateAutoTimestampTranslatorFactory
/** /**
* Load an existing timestamp. It can be assumed to be non-null since if the field is null in * Load an existing timestamp. It can be assumed to be non-null since if the field is null in
* datastore then Objectify will skip this translator and directly load a null. * Datastore then Objectify will skip this translator and directly load a null.
*/ */
@Override @Override
public CreateAutoTimestamp loadValue(Date datastoreValue) { public CreateAutoTimestamp loadValue(Date datastoreValue) {

View file

@ -143,7 +143,7 @@ abstract class ImmutableSortedMapTranslatorFactory<K extends Comparable<? super
ImmutableSortedMap<K, V> mapToSave = transformBeforeSave( ImmutableSortedMap<K, V> mapToSave = transformBeforeSave(
ImmutableSortedMap.copyOfSorted(nullToEmpty(mapFromPojo))); ImmutableSortedMap.copyOfSorted(nullToEmpty(mapFromPojo)));
if (mapToSave.isEmpty()) { if (mapToSave.isEmpty()) {
throw new SkipException(); // the datastore doesn't store empty lists throw new SkipException(); // Datastore doesn't store empty lists
} }
Node node = new Node(path); Node node = new Node(path);
for (Map.Entry<K, V> entry : mapToSave.entrySet()) { for (Map.Entry<K, V> entry : mapToSave.entrySet()) {

View file

@ -35,7 +35,7 @@ public class UpdateAutoTimestampTranslatorFactory
/** /**
* Load an existing timestamp. It can be assumed to be non-null since if the field is null in * Load an existing timestamp. It can be assumed to be non-null since if the field is null in
* datastore then Objectify will skip this translator and directly load a null. * Datastore then Objectify will skip this translator and directly load a null.
*/ */
@Override @Override
public UpdateAutoTimestamp loadValue(Date datastoreValue) { public UpdateAutoTimestamp loadValue(Date datastoreValue) {

View file

@ -17,7 +17,7 @@ package google.registry.rde;
import com.google.auto.value.AutoValue; import com.google.auto.value.AutoValue;
import java.io.Serializable; import java.io.Serializable;
/** Container of datastore resource marshalled by {@link RdeMarshaller}. */ /** Container of Datastore resource marshalled by {@link RdeMarshaller}. */
@AutoValue @AutoValue
public abstract class DepositFragment implements Serializable { public abstract class DepositFragment implements Serializable {

View file

@ -41,7 +41,7 @@ import org.joda.time.Duration;
* <p>{@link Lock} is used to ensure only one task executes at a time for a given * <p>{@link Lock} is used to ensure only one task executes at a time for a given
* {@code LockedCursorTask} subclass + TLD combination. This is necessary because App Engine tasks * {@code LockedCursorTask} subclass + TLD combination. This is necessary because App Engine tasks
* might double-execute. Normally tasks solve this by being idempotent, but that's not possible for * might double-execute. Normally tasks solve this by being idempotent, but that's not possible for
* RDE, which writes to a GCS filename with a deterministic name. So the datastore is used to to * RDE, which writes to a GCS filename with a deterministic name. So Datastore is used to to
* guarantee isolation. If we can't acquire the lock, it means the task is already running, so * guarantee isolation. If we can't acquire the lock, it means the task is already running, so
* {@link NoContentException} is thrown to cancel the task. * {@link NoContentException} is thrown to cancel the task.
* *
@ -62,7 +62,7 @@ class EscrowTaskRunner {
/** /**
* Performs task logic while the lock is held. * Performs task logic while the lock is held.
* *
* @param watermark the logical time for a point-in-time view of datastore * @param watermark the logical time for a point-in-time view of Datastore
*/ */
abstract void runWithLock(DateTime watermark) throws Exception; abstract void runWithLock(DateTime watermark) throws Exception;
} }

View file

@ -48,7 +48,7 @@ import org.joda.time.Duration;
* *
* <p>If no deposits have been made so far, then {@code startingPoint} is used as the watermark * <p>If no deposits have been made so far, then {@code startingPoint} is used as the watermark
* of the next deposit. If that's a day in the future, then escrow won't start until that date. * of the next deposit. If that's a day in the future, then escrow won't start until that date.
* This first deposit time will be set to datastore in a transaction. * This first deposit time will be set to Datastore in a transaction.
*/ */
public final class PendingDepositChecker { public final class PendingDepositChecker {

View file

@ -49,7 +49,7 @@ import org.joda.time.Duration;
* If there's nothing to deposit, we return 204 No Content; otherwise, we fire off a MapReduce job * If there's nothing to deposit, we return 204 No Content; otherwise, we fire off a MapReduce job
* and redirect to its status GUI. * and redirect to its status GUI.
* *
* <p>The mapreduce job scans every {@link EppResource} in datastore. It maps a point-in-time * <p>The mapreduce job scans every {@link EppResource} in Datastore. It maps a point-in-time
* representation of each entity to the escrow XML files in which it should appear. * representation of each entity to the escrow XML files in which it should appear.
* *
* <p>There is one map worker for each {@code EppResourceIndexBucket} entity group shard. There is * <p>There is one map worker for each {@code EppResourceIndexBucket} entity group shard. There is
@ -80,7 +80,7 @@ import org.joda.time.Duration;
* *
* <p>Valid model objects might not be valid to the RDE XML schema. A single invalid object will * <p>Valid model objects might not be valid to the RDE XML schema. A single invalid object will
* cause the whole deposit to fail. You need to check the logs, find out which entities are broken, * cause the whole deposit to fail. You need to check the logs, find out which entities are broken,
* and perform datastore surgery. * and perform Datastore surgery.
* *
* <p>If a deposit fails, an error is emitted to the logs for each broken entity. It tells you the * <p>If a deposit fails, an error is emitted to the logs for each broken entity. It tells you the
* key and shows you its representation in lenient XML. * key and shows you its representation in lenient XML.

View file

@ -68,7 +68,7 @@ public final class RdeStagingMapper extends Mapper<EppResource, PendingDeposit,
return; return;
} }
// Skip polymorphic entities that share datastore kind. // Skip polymorphic entities that share Datastore kind.
if (!(resource instanceof ContactResource if (!(resource instanceof ContactResource
|| resource instanceof DomainResource || resource instanceof DomainResource
|| resource instanceof HostResource)) { || resource instanceof HostResource)) {

View file

@ -47,10 +47,10 @@ import org.joda.time.DateTime;
* A mapreduce that links hosts from an escrow file to their superordinate domains. * A mapreduce that links hosts from an escrow file to their superordinate domains.
* *
* <p>This mapreduce is run as the last step of the process of importing escrow files. For each host * <p>This mapreduce is run as the last step of the process of importing escrow files. For each host
* in the escrow file, the corresponding {@link HostResource} record in the datastore is linked to * in the escrow file, the corresponding {@link HostResource} record in Datastore is linked to its
* its superordinate {@link DomainResource} only if it is an in-zone host. This is necessary because * superordinate {@link DomainResource} only if it is an in-zone host. This is necessary because all
* all hosts must exist before domains can be imported, due to references in host objects, and * hosts must exist before domains can be imported, due to references in host objects, and domains
* domains must exist before hosts can be linked to their superordinate domains. * must exist before hosts can be linked to their superordinate domains.
* *
* <p>Specify the escrow file to import with the "path" parameter. * <p>Specify the escrow file to import with the "path" parameter.
*/ */

View file

@ -36,7 +36,7 @@ public class ClaimsListParser {
/** /**
* Converts the lines from the DNL CSV file into a {@link ClaimsListShard} object. * Converts the lines from the DNL CSV file into a {@link ClaimsListShard} object.
* *
* <p>Please note that this does <b>not</b> insert the object into the datastore. * <p>Please note that this does <b>not</b> insert the object into Datastore.
*/ */
public static ClaimsListShard parse(List<String> lines) { public static ClaimsListShard parse(List<String> lines) {
ImmutableMap.Builder<String, String> builder = new ImmutableMap.Builder<>(); ImmutableMap.Builder<String, String> builder = new ImmutableMap.Builder<>();

View file

@ -34,7 +34,7 @@ public final class TmchCrlAction implements Runnable {
@Inject TmchCertificateAuthority tmchCertificateAuthority; @Inject TmchCertificateAuthority tmchCertificateAuthority;
@Inject TmchCrlAction() {} @Inject TmchCrlAction() {}
/** Synchronously fetches latest ICANN TMCH CRL and saves it to datastore. */ /** Synchronously fetches latest ICANN TMCH CRL and saves it to Datastore. */
@Override @Override
public void run() { public void run() {
try { try {

View file

@ -24,7 +24,7 @@ import org.bouncycastle.openpgp.PGPPublicKey;
import org.bouncycastle.openpgp.PGPUtil; import org.bouncycastle.openpgp.PGPUtil;
import org.bouncycastle.openpgp.bc.BcPGPPublicKeyRing; import org.bouncycastle.openpgp.bc.BcPGPPublicKeyRing;
/** Helper class for common data loaded from the jar and datastore at runtime. */ /** Helper class for common data loaded from the jar and Datastore at runtime. */
public final class TmchData { public final class TmchData {
private static final String BEGIN_ENCODED_SMD = "-----BEGIN ENCODED SMD-----"; private static final String BEGIN_ENCODED_SMD = "-----BEGIN ENCODED SMD-----";

View file

@ -39,7 +39,7 @@ public final class TmchDnlAction implements Runnable {
@Inject @Key("marksdbDnlLogin") Optional<String> marksdbDnlLogin; @Inject @Key("marksdbDnlLogin") Optional<String> marksdbDnlLogin;
@Inject TmchDnlAction() {} @Inject TmchDnlAction() {}
/** Synchronously fetches latest domain name list and saves it to datastore. */ /** Synchronously fetches latest domain name list and saves it to Datastore. */
@Override @Override
public void run() { public void run() {
List<String> lines; List<String> lines;
@ -50,7 +50,7 @@ public final class TmchDnlAction implements Runnable {
} }
ClaimsListShard claims = ClaimsListParser.parse(lines); ClaimsListShard claims = ClaimsListParser.parse(lines);
claims.save(); claims.save();
logger.infofmt("Inserted %,d claims into datastore, created at %s", logger.infofmt("Inserted %,d claims into Datastore, created at %s",
claims.size(), claims.getCreationTime()); claims.size(), claims.getCreationTime());
} }
} }

View file

@ -39,7 +39,7 @@ public final class TmchSmdrlAction implements Runnable {
@Inject @Key("marksdbSmdrlLogin") Optional<String> marksdbSmdrlLogin; @Inject @Key("marksdbSmdrlLogin") Optional<String> marksdbSmdrlLogin;
@Inject TmchSmdrlAction() {} @Inject TmchSmdrlAction() {}
/** Synchronously fetches latest signed mark revocation list and saves it to datastore. */ /** Synchronously fetches latest signed mark revocation list and saves it to Datastore. */
@Override @Override
public void run() { public void run() {
List<String> lines; List<String> lines;
@ -50,7 +50,7 @@ public final class TmchSmdrlAction implements Runnable {
} }
SignedMarkRevocationList smdrl = SmdrlCsvParser.parse(lines); SignedMarkRevocationList smdrl = SmdrlCsvParser.parse(lines);
smdrl.save(); smdrl.save();
logger.infofmt("Inserted %,d smd revocations into datastore, created at %s", logger.infofmt("Inserted %,d smd revocations into Datastore, created at %s",
smdrl.size(), smdrl.getCreationTime()); smdrl.size(), smdrl.getCreationTime());
} }
} }

View file

@ -22,9 +22,9 @@ import google.registry.export.DatastoreBackupService;
import google.registry.tools.Command.RemoteApiCommand; import google.registry.tools.Command.RemoteApiCommand;
/** /**
* Command to check the status of a datastore backup, or "snapshot". * Command to check the status of a Datastore backup, or "snapshot".
*/ */
@Parameters(separators = " =", commandDescription = "Check the status of a datastore snapshot") @Parameters(separators = " =", commandDescription = "Check the status of a Datastore snapshot")
public class CheckSnapshotCommand implements RemoteApiCommand { public class CheckSnapshotCommand implements RemoteApiCommand {
@Parameter( @Parameter(

View file

@ -67,7 +67,7 @@ import org.joda.time.DateTime;
* </pre> * </pre>
* *
* <p>We only care about three fields: 1) the "Affiliate" field which corresponds to the registrar * <p>We only care about three fields: 1) the "Affiliate" field which corresponds to the registrar
* clientId stored in datastore, and which we use to determine which registrar gets the credit, 2) * clientId stored in Datastore, and which we use to determine which registrar gets the credit, 2)
* the "Commissions" field which contains the amount of the auction credit (as determined by logic * the "Commissions" field which contains the amount of the auction credit (as determined by logic
* on the auction provider's side, see the Finance Requirements Doc for more information), and 3) * on the auction provider's side, see the Finance Requirements Doc for more information), and 3)
* the "CurrencyCode" field, which we validate matches the TLD-wide currency for this TLD. * the "CurrencyCode" field, which we validate matches the TLD-wide currency for this TLD.

View file

@ -31,7 +31,7 @@ import java.util.List;
import org.joda.time.DateTime; import org.joda.time.DateTime;
/** Command to create a {@link ReservedList} on Datastore. */ /** Command to create a {@link ReservedList} on Datastore. */
@Parameters(separators = " =", commandDescription = "Create a ReservedList in datastore.") @Parameters(separators = " =", commandDescription = "Create a ReservedList in Datastore.")
final class CreateReservedListCommand extends CreateOrUpdateReservedListCommand { final class CreateReservedListCommand extends CreateOrUpdateReservedListCommand {
@VisibleForTesting @VisibleForTesting

View file

@ -74,7 +74,7 @@ final class GenerateEscrowDepositCommand implements RemoteApiCommand {
@Parameter( @Parameter(
names = {"-w", "--watermark"}, names = {"-w", "--watermark"},
description = "Point-in-time timestamp for snapshotting the datastore.", description = "Point-in-time timestamp for snapshotting Datastore.",
validateWith = DateTimeParameter.class) validateWith = DateTimeParameter.class)
private DateTime watermark = DateTime.now(UTC); private DateTime watermark = DateTime.now(UTC);

View file

@ -31,7 +31,7 @@ import java.nio.file.Paths;
* A command to download the current claims list. * A command to download the current claims list.
* *
* <p>This is not the original file we fetched from TMCH. It is just a representation of what we * <p>This is not the original file we fetched from TMCH. It is just a representation of what we
* are currently storing in datastore. * are currently storing in Datastore.
*/ */
@Parameters(separators = " =", commandDescription = "Download the current claims list") @Parameters(separators = " =", commandDescription = "Download the current claims list")
final class GetClaimsListCommand implements RemoteApiCommand { final class GetClaimsListCommand implements RemoteApiCommand {

View file

@ -25,7 +25,7 @@ import google.registry.tools.Command.RemoteApiCommand;
import java.util.List; import java.util.List;
/** /**
* Command to get info on a datastore resource by websafe key. * Command to get info on a Datastore resource by websafe key.
*/ */
@Parameters(separators = " =") @Parameters(separators = " =")
final class GetResourceByKeyCommand implements RemoteApiCommand { final class GetResourceByKeyCommand implements RemoteApiCommand {

View file

@ -32,8 +32,8 @@ import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
/** Command to load datastore snapshots into Bigquery. */ /** Command to load Datastore snapshots into Bigquery. */
@Parameters(separators = " =", commandDescription = "Load datastore snapshot into Bigquery") @Parameters(separators = " =", commandDescription = "Load Datastore snapshot into Bigquery")
final class LoadSnapshotCommand extends BigqueryCommand { final class LoadSnapshotCommand extends BigqueryCommand {
@Parameter( @Parameter(
@ -43,12 +43,12 @@ final class LoadSnapshotCommand extends BigqueryCommand {
@Parameter( @Parameter(
names = "--gcs_bucket", names = "--gcs_bucket",
description = "Name of the GCS bucket from which to import datastore snapshots.") description = "Name of the GCS bucket from which to import Datastore snapshots.")
private String snapshotGcsBucket = "domain-registry/snapshots/testing"; private String snapshotGcsBucket = "domain-registry/snapshots/testing";
@Parameter( @Parameter(
names = "--kinds", names = "--kinds",
description = "List of datastore kinds for which to import snapshot data.") description = "List of Datastore kinds for which to import snapshot data.")
private List<String> kindNames = new ArrayList<>(ExportConstants.getReportingKinds()); private List<String> kindNames = new ArrayList<>(ExportConstants.getReportingKinds());
/** Runs the main snapshot import logic. */ /** Runs the main snapshot import logic. */

View file

@ -44,7 +44,7 @@ import java.util.Objects;
import java.util.Set; import java.util.Set;
import javax.annotation.Nullable; import javax.annotation.Nullable;
/** A {@link ConfirmingCommand} that changes objects in the datastore. */ /** A {@link ConfirmingCommand} that changes objects in Datastore. */
public abstract class MutatingCommand extends ConfirmingCommand implements RemoteApiCommand { public abstract class MutatingCommand extends ConfirmingCommand implements RemoteApiCommand {
/** /**

View file

@ -33,7 +33,7 @@ import java.util.HashMap;
import java.util.Map; import java.util.Map;
import org.bouncycastle.jce.provider.BouncyCastleProvider; import org.bouncycastle.jce.provider.BouncyCastleProvider;
/** Container class to create and run remote commands against a datastore instance. */ /** Container class to create and run remote commands against a Datastore instance. */
@Parameters(separators = " =", commandDescription = "Command-line interface to the registry") @Parameters(separators = " =", commandDescription = "Command-line interface to the registry")
final class RegistryCli { final class RegistryCli {

View file

@ -19,7 +19,7 @@ import google.registry.tools.javascrap.FixDomainNameserverKeysCommand;
import google.registry.tools.javascrap.RemoveDomainTransferDataCommand; import google.registry.tools.javascrap.RemoveDomainTransferDataCommand;
import google.registry.tools.javascrap.RemoveIpAddressCommand; import google.registry.tools.javascrap.RemoveIpAddressCommand;
/** Container class to create and run remote commands against a datastore instance. */ /** Container class to create and run remote commands against a Datastore instance. */
public final class RegistryTool { public final class RegistryTool {
/** /**

View file

@ -26,7 +26,7 @@ import java.util.List;
/** /**
* A command to load and resave an entity by websafe key. * A command to load and resave an entity by websafe key.
* *
* <p>This triggers @OnSave changes. If the entity was directly edited in the datastore viewer, this * <p>This triggers @OnSave changes. If the entity was directly edited in the Datastore viewer, this
* can be used to make sure that the commit logs reflect the new state. * can be used to make sure that the commit logs reflect the new state.
*/ */
@Parameters( @Parameters(

View file

@ -28,7 +28,7 @@ import org.joda.time.DateTime;
/** /**
* A command to load and resave an {@link EppResource} by foreign key. * A command to load and resave an {@link EppResource} by foreign key.
* *
* <p>This triggers @OnSave changes. If the entity was directly edited in the datastore viewer, this * <p>This triggers @OnSave changes. If the entity was directly edited in the Datastore viewer, this
* can be used to make sure that the commit logs reflect the new state. * can be used to make sure that the commit logs reflect the new state.
*/ */
@Parameters( @Parameters(

View file

@ -153,7 +153,7 @@ final class UpdateApplicationStatusCommand extends MutatingCommand {
applicationBuilder.addStatusValue(StatusValue.PENDING_CREATE); applicationBuilder.addStatusValue(StatusValue.PENDING_CREATE);
} }
// Stage changes for all entities that need to be saved to datastore. // Stage changes for all entities that need to be saved to Datastore.
stageEntityChange(domainApplication, applicationBuilder.build()); stageEntityChange(domainApplication, applicationBuilder.build());
stageEntityChange(null, pollMessageBuilder.build()); stageEntityChange(null, pollMessageBuilder.build());
stageEntityChange(null, newHistoryEntry); stageEntityChange(null, newHistoryEntry);

View file

@ -111,7 +111,7 @@ final class UpdateClaimsNoticeCommand implements RemoteApiCommand {
.setBySuperuser(true) .setBySuperuser(true)
.build(); .build();
// Save entities to datastore. // Save entities to Datastore.
ofy().save().<Object>entities(updatedApplication, newHistoryEntry); ofy().save().<Object>entities(updatedApplication, newHistoryEntry);
} }
} }

View file

@ -27,7 +27,7 @@ import google.registry.util.SystemClock;
import java.nio.file.Files; import java.nio.file.Files;
/** Command to safely update {@link ReservedList} on Datastore. */ /** Command to safely update {@link ReservedList} on Datastore. */
@Parameters(separators = " =", commandDescription = "Update a ReservedList in datastore.") @Parameters(separators = " =", commandDescription = "Update a ReservedList in Datastore.")
final class UpdateReservedListCommand extends CreateOrUpdateReservedListCommand { final class UpdateReservedListCommand extends CreateOrUpdateReservedListCommand {
@Override @Override

View file

@ -113,7 +113,7 @@ final class UpdateSmdCommand implements RemoteApiCommand {
.setReason("UpdateSmdCommand" + (reason != null ? ": " + reason : "")) .setReason("UpdateSmdCommand" + (reason != null ? ": " + reason : ""))
.build(); .build();
// Save entities to datastore. // Save entities to Datastore.
ofy().save().<Object>entities(updatedApplication, newHistoryEntry); ofy().save().<Object>entities(updatedApplication, newHistoryEntry);
} }
} }

View file

@ -78,7 +78,7 @@ public class DeleteEntityAction implements Runnable {
rawDeletionsBuilder.add(rawEntity.get().getKey()); rawDeletionsBuilder.add(rawEntity.get().getKey());
continue; continue;
} }
// The entity could not be found by either Objectify or the datastore service // The entity could not be found by either Objectify or the Datastore service
throw new BadRequestException("Could not find entity with key " + rawKeyString); throw new BadRequestException("Could not find entity with key " + rawKeyString);
} }
// Delete raw entities. // Delete raw entities.
@ -108,7 +108,7 @@ public class DeleteEntityAction implements Runnable {
try { try {
return Optional.fromNullable(getDatastoreService().get(rawKey)); return Optional.fromNullable(getDatastoreService().get(rawKey));
} catch (EntityNotFoundException e) { } catch (EntityNotFoundException e) {
logger.warningfmt(e, "Could not load entity from datastore service with key %s", logger.warningfmt(e, "Could not load entity from Datastore service with key %s",
rawKey.toString()); rawKey.toString());
return Optional.absent(); return Optional.absent();
} }

View file

@ -35,7 +35,7 @@ import google.registry.request.Response;
import java.util.Arrays; import java.util.Arrays;
import javax.inject.Inject; import javax.inject.Inject;
/** Deletes all commit logs in datastore. */ /** Deletes all commit logs in Datastore. */
@Action(path = "/_dr/task/killAllCommitLogs", method = POST) @Action(path = "/_dr/task/killAllCommitLogs", method = POST)
public class KillAllCommitLogsAction implements Runnable { public class KillAllCommitLogsAction implements Runnable {

View file

@ -35,7 +35,7 @@ import google.registry.request.Action;
import google.registry.request.Response; import google.registry.request.Response;
import javax.inject.Inject; import javax.inject.Inject;
/** Deletes all {@link EppResource} objects in datastore, including indices and descendants. */ /** Deletes all {@link EppResource} objects in Datastore, including indices and descendants. */
@Action(path = "/_dr/task/killAllEppResources", method = POST) @Action(path = "/_dr/task/killAllEppResources", method = POST)
public class KillAllEppResourcesAction implements Runnable { public class KillAllEppResourcesAction implements Runnable {

View file

@ -60,7 +60,7 @@ public class SessionUtils {
* <li>If it does not exist, then we will attempt to guess the {@link Registrar} with which the * <li>If it does not exist, then we will attempt to guess the {@link Registrar} with which the
* user's GAIA ID is associated. The {@code clientId} of the first matching {@code Registrar} will * user's GAIA ID is associated. The {@code clientId} of the first matching {@code Registrar} will
* then be stored to the HTTP session. * then be stored to the HTTP session.
* <li>If it does exist, then we'll fetch the Registrar from the datastore to make sure access * <li>If it does exist, then we'll fetch the Registrar from Datastore to make sure access
* wasn't revoked. This should only cost one memcache read. * wasn't revoked. This should only cost one memcache read.
* </ul> * </ul>
* *
@ -134,7 +134,7 @@ public class SessionUtils {
private static boolean hasAccessToRegistrar(String clientId, final String gaeUserId) { private static boolean hasAccessToRegistrar(String clientId, final String gaeUserId) {
Registrar registrar = Registrar.loadByClientId(clientId); Registrar registrar = Registrar.loadByClientId(clientId);
if (registrar == null) { if (registrar == null) {
logger.warningfmt("Registrar '%s' disappeared from the datastore!", clientId); logger.warningfmt("Registrar '%s' disappeared from Datastore!", clientId);
return false; return false;
} }
return hasAccessToRegistrar(registrar, gaeUserId); return hasAccessToRegistrar(registrar, gaeUserId);

View file

@ -113,7 +113,7 @@ public class CollectionUtils {
* Turns an empty collection into a null collection. * Turns an empty collection into a null collection.
* *
* <p>This is unwise in the general case (nulls are bad; empties are good) but occasionally needed * <p>This is unwise in the general case (nulls are bad; empties are good) but occasionally needed
* to cause JAXB not to emit a field, or to avoid saving something to datastore. The method name * to cause JAXB not to emit a field, or to avoid saving something to Datastore. The method name
* includes "force" to indicate that you should think twice before using it. * includes "force" to indicate that you should think twice before using it.
*/ */
@Nullable @Nullable

Some files were not shown because too many files have changed in this diff Show more