Refer to Datastore everywhere correctly by its capitalized form

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=147479683
This commit is contained in:
mcilwain 2017-02-14 09:11:30 -08:00 committed by Ben McIlwain
parent a8cf81bca2
commit cdadb54acd
123 changed files with 232 additions and 235 deletions

View file

@ -47,7 +47,7 @@ import org.joda.time.DateTime;
* transaction throughput, while maintaining the ability to perform strongly-consistent ancestor
* queries.
*
* @see <a href="https://cloud.google.com/appengine/articles/scaling/contention">Avoiding datastore
* @see <a href="https://cloud.google.com/appengine/articles/scaling/contention">Avoiding Datastore
* contention</a>
*/
@Entity

View file

@ -32,14 +32,14 @@ import java.util.Objects;
import org.joda.time.DateTime;
/**
* Entity representing a point-in-time consistent view of datastore, based on commit logs.
* Entity representing a point-in-time consistent view of Datastore, based on commit logs.
*
* <p>Conceptually, this entity consists of two pieces of information: the checkpoint "wall" time
* and a set of bucket checkpoint times. The former is the ID for this checkpoint (constrained
* to be unique upon checkpoint creation) and also represents the approximate wall time of the
* consistent datastore view this checkpoint represents. The latter is really a mapping from
* consistent Datastore view this checkpoint represents. The latter is really a mapping from
* bucket ID to timestamp, where the timestamp dictates the upper bound (inclusive) on commit logs
* from that bucket to include when restoring the datastore to this checkpoint.
* from that bucket to include when restoring Datastore to this checkpoint.
*/
@Entity
@NotBackedUp(reason = Reason.COMMIT_LOGS)

View file

@ -30,7 +30,7 @@ import java.util.Set;
import org.joda.time.DateTime;
/**
* Archived datastore transaction that can be replayed.
* Archived Datastore transaction that can be replayed.
*
* <p>Entities of this kind are entity group sharded using a {@link CommitLogBucket} parent. Each
* object that was saved during this transaction is stored in a {@link CommitLogMutation} child

View file

@ -62,14 +62,14 @@ public class CommitLogMutation extends ImmutableObject {
* Returns a new mutation entity created from an @Entity ImmutableObject instance.
*
* <p>The mutation key is generated deterministically from the {@code entity} key. The object is
* converted to a raw datastore Entity, serialized to bytes, and stored within the mutation.
* converted to a raw Datastore Entity, serialized to bytes, and stored within the mutation.
*/
public static CommitLogMutation create(Key<CommitLogManifest> parent, Object entity) {
return createFromRaw(parent, ofy().save().toEntity(entity));
}
/**
* Returns a new mutation entity created from a raw datastore Entity instance.
* Returns a new mutation entity created from a raw Datastore Entity instance.
*
* <p>The mutation key is generated deterministically from the {@code entity} key. The Entity
* itself is serialized to bytes and stored within the returned mutation.

View file

@ -102,8 +102,8 @@ public class ObjectifyService {
@Override
protected AsyncDatastoreService createRawAsyncDatastoreService(DatastoreServiceConfig cfg) {
// In the unit test environment, wrap the datastore service in a proxy that can be used to
// examine the number of requests sent to datastore.
// In the unit test environment, wrap the Datastore service in a proxy that can be used to
// examine the number of requests sent to Datastore.
AsyncDatastoreService service = super.createRawAsyncDatastoreService(cfg);
return RegistryEnvironment.get().equals(RegistryEnvironment.UNITTEST)
? new RequestCapturingAsyncDatastoreService(service)
@ -134,7 +134,7 @@ public class ObjectifyService {
}
}
/** Register classes that can be persisted via Objectify as datastore entities. */
/** Register classes that can be persisted via Objectify as Datastore entities. */
private static void registerEntityClasses(
Iterable<Class<? extends ImmutableObject>> entityClasses) {
// Register all the @Entity classes before any @EntitySubclass classes so that we can check

View file

@ -69,7 +69,7 @@ public class Ofy {
*
* <p>This value should used as a cache expiration time for any entities annotated with an
* Objectify {@code @Cache} annotation, to put an upper bound on unlikely-but-possible divergence
* between memcache and datastore when a memcache write fails.
* between memcache and Datastore when a memcache write fails.
*/
public static final int RECOMMENDED_MEMCACHE_EXPIRATION = 3600;
@ -230,7 +230,7 @@ public class Ofy {
| DatastoreFailureException e) {
// TransientFailureExceptions come from task queues and always mean nothing committed.
// TimestampInversionExceptions are thrown by our code and are always retryable as well.
// However, datastore exceptions might get thrown even if the transaction succeeded.
// However, Datastore exceptions might get thrown even if the transaction succeeded.
if ((e instanceof DatastoreTimeoutException || e instanceof DatastoreFailureException)
&& checkIfAlreadySucceeded(work)) {
return work.getResult();
@ -255,10 +255,10 @@ public class Ofy {
CommitLogManifest manifest = work.getManifest();
if (manifest == null) {
// Work ran but no commit log was created. This might mean that the transaction did not
// write anything to datastore. We can safely retry because it only reads. (Although the
// write anything to Datastore. We can safely retry because it only reads. (Although the
// transaction might have written a task to a queue, we consider that safe to retry too
// since we generally assume that tasks might be doubly executed.) Alternatively it
// might mean that the transaction wrote to datastore but turned off commit logs by
// might mean that the transaction wrote to Datastore but turned off commit logs by
// exclusively using save/deleteWithoutBackups() rather than save/delete(). Although we
// have no hard proof that retrying is safe, we use these methods judiciously and it is
// reasonable to assume that if the transaction really did succeed that the retry will
@ -300,7 +300,7 @@ public class Ofy {
/**
* Execute some work with a fresh session cache.
*
* <p>This is useful in cases where we want to load the latest possible data from datastore but
* <p>This is useful in cases where we want to load the latest possible data from Datastore but
* don't need point-in-time consistency across loads and consequently don't need a transaction.
* Note that unlike a transaction's fresh session cache, the contents of this cache will be
* discarded once the work completes, rather than being propagated into the enclosing session.

View file

@ -39,9 +39,9 @@ public class RequestCapturingAsyncDatastoreService implements AsyncDatastoreServ
private final AsyncDatastoreService delegate;
// Each outer lists represents datastore operations, with inner lists representing the keys or
// Each outer lists represents Datastore operations, with inner lists representing the keys or
// entities involved in that operation. We use static lists because we care about overall calls to
// datastore, not calls via a specific instance of the service.
// Datastore, not calls via a specific instance of the service.
private static List<List<Key>> reads = synchronizedList(new ArrayList<List<Key>>());
private static List<List<Key>> deletes = synchronizedList(new ArrayList<List<Key>>());

View file

@ -24,8 +24,8 @@ import java.util.Map;
import org.joda.time.DateTime;
/**
* Exception when trying to write to the datastore with a timestamp that is inconsistent with
* a partial ordering on transactions that touch the same entities.
* Exception when trying to write to Datastore with a timestamp that is inconsistent with a partial
* ordering on transactions that touch the same entities.
*/
class TimestampInversionException extends RuntimeException {