initial commit
This commit is contained in:
1
server/node_modules/@google-cloud/firestore/build/protos/admin_v1.json
generated
vendored
Normal file
1
server/node_modules/@google-cloud/firestore/build/protos/admin_v1.json
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
120
server/node_modules/@google-cloud/firestore/build/protos/firestore/bundle.proto
generated
vendored
Normal file
120
server/node_modules/@google-cloud/firestore/build/protos/firestore/bundle.proto
generated
vendored
Normal file
@@ -0,0 +1,120 @@
|
||||
// Copyright 2020 Google LLC.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
// This file defines the format of Firestore bundle file/stream. It is not a part of the
|
||||
// Firestore API, only a specification used by Server and Client SDK to write and read
|
||||
// bundles.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package firestore;
|
||||
|
||||
import "google/firestore/v1/document.proto";
|
||||
import "google/firestore/v1/query.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option csharp_namespace = "Firestore.Proto";
|
||||
option go_package = "google.golang.org/genproto/firestore/proto;firestore";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "BundleProto";
|
||||
option java_package = "com.google.firestore.proto";
|
||||
option objc_class_prefix = "FSTPB";
|
||||
option php_namespace = "Firestore\\Proto";
|
||||
|
||||
// Encodes a query saved in the bundle.
|
||||
message BundledQuery {
|
||||
// The parent resource name.
|
||||
string parent = 1;
|
||||
|
||||
// The query to run.
|
||||
oneof query_type {
|
||||
// A structured query.
|
||||
google.firestore.v1.StructuredQuery structured_query = 2;
|
||||
}
|
||||
|
||||
// If the query is a limit query, should the limit be applied to the beginning or
|
||||
// the end of results.
|
||||
enum LimitType {
|
||||
FIRST = 0;
|
||||
LAST = 1;
|
||||
}
|
||||
LimitType limit_type = 3;
|
||||
}
|
||||
|
||||
// A Query associated with a name, created as part of the bundle file, and can be read
|
||||
// by client SDKs once the bundle containing them is loaded.
|
||||
message NamedQuery {
|
||||
// Name of the query, such that client can use the name to load this query
|
||||
// from bundle, and resume from when the query results are materialized
|
||||
// into this bundle.
|
||||
string name = 1;
|
||||
|
||||
// The query saved in the bundle.
|
||||
BundledQuery bundled_query = 2;
|
||||
|
||||
// The read time of the query, when it is used to build the bundle. This is useful to
|
||||
// resume the query from the bundle, once it is loaded by client SDKs.
|
||||
google.protobuf.Timestamp read_time = 3;
|
||||
}
|
||||
|
||||
// Metadata describing a Firestore document saved in the bundle.
|
||||
message BundledDocumentMetadata {
|
||||
// The document key of a bundled document.
|
||||
string name = 1;
|
||||
|
||||
// The snapshot version of the document data bundled.
|
||||
google.protobuf.Timestamp read_time = 2;
|
||||
|
||||
// Whether the document exists.
|
||||
bool exists = 3;
|
||||
|
||||
// The names of the queries in this bundle that this document matches to.
|
||||
repeated string queries = 4;
|
||||
}
|
||||
|
||||
// Metadata describing the bundle file/stream.
|
||||
message BundleMetadata {
|
||||
// The ID of the bundle.
|
||||
string id = 1;
|
||||
|
||||
// Time at which the documents snapshot is taken for this bundle.
|
||||
google.protobuf.Timestamp create_time = 2;
|
||||
|
||||
// The schema version of the bundle.
|
||||
uint32 version = 3;
|
||||
|
||||
// The number of documents in the bundle.
|
||||
uint32 total_documents = 4;
|
||||
|
||||
// The size of the bundle in bytes, excluding this `BundleMetadata`.
|
||||
uint64 total_bytes = 5;
|
||||
}
|
||||
|
||||
// A Firestore bundle is a length-prefixed stream of JSON representations of
|
||||
// `BundleElement`.
|
||||
// Only one `BundleMetadata` is expected, and it should be the first element.
|
||||
// The named queries follow after `metadata`. Every `document_metadata` is
|
||||
// immediately followed by a `document`.
|
||||
message BundleElement {
|
||||
oneof element_type {
|
||||
BundleMetadata metadata = 1;
|
||||
|
||||
NamedQuery named_query = 2;
|
||||
|
||||
BundledDocumentMetadata document_metadata = 3;
|
||||
|
||||
google.firestore.v1.Document document = 4;
|
||||
}
|
||||
}
|
||||
9761
server/node_modules/@google-cloud/firestore/build/protos/firestore_admin_v1_proto_api.d.ts
generated
vendored
Normal file
9761
server/node_modules/@google-cloud/firestore/build/protos/firestore_admin_v1_proto_api.d.ts
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
server/node_modules/@google-cloud/firestore/build/protos/firestore_admin_v1_proto_api.js
generated
vendored
Normal file
1
server/node_modules/@google-cloud/firestore/build/protos/firestore_admin_v1_proto_api.js
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
10576
server/node_modules/@google-cloud/firestore/build/protos/firestore_v1_proto_api.d.ts
generated
vendored
Normal file
10576
server/node_modules/@google-cloud/firestore/build/protos/firestore_v1_proto_api.d.ts
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
server/node_modules/@google-cloud/firestore/build/protos/firestore_v1_proto_api.js
generated
vendored
Normal file
1
server/node_modules/@google-cloud/firestore/build/protos/firestore_v1_proto_api.js
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
9363
server/node_modules/@google-cloud/firestore/build/protos/firestore_v1beta1_proto_api.d.ts
generated
vendored
Normal file
9363
server/node_modules/@google-cloud/firestore/build/protos/firestore_v1beta1_proto_api.d.ts
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
server/node_modules/@google-cloud/firestore/build/protos/firestore_v1beta1_proto_api.js
generated
vendored
Normal file
1
server/node_modules/@google-cloud/firestore/build/protos/firestore_v1beta1_proto_api.js
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
31
server/node_modules/@google-cloud/firestore/build/protos/google/api/annotations.proto
generated
vendored
Normal file
31
server/node_modules/@google-cloud/firestore/build/protos/google/api/annotations.proto
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.api;
|
||||
|
||||
import "google/api/http.proto";
|
||||
import "google/protobuf/descriptor.proto";
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "AnnotationsProto";
|
||||
option java_package = "com.google.api";
|
||||
option objc_class_prefix = "GAPI";
|
||||
|
||||
extend google.protobuf.MethodOptions {
|
||||
// See `HttpRule`.
|
||||
HttpRule http = 72295728;
|
||||
}
|
||||
480
server/node_modules/@google-cloud/firestore/build/protos/google/api/client.proto
generated
vendored
Normal file
480
server/node_modules/@google-cloud/firestore/build/protos/google/api/client.proto
generated
vendored
Normal file
@@ -0,0 +1,480 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.api;
|
||||
|
||||
import "google/api/launch_stage.proto";
|
||||
import "google/protobuf/descriptor.proto";
|
||||
import "google/protobuf/duration.proto";
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "ClientProto";
|
||||
option java_package = "com.google.api";
|
||||
option objc_class_prefix = "GAPI";
|
||||
|
||||
extend google.protobuf.MethodOptions {
|
||||
// A definition of a client library method signature.
|
||||
//
|
||||
// In client libraries, each proto RPC corresponds to one or more methods
|
||||
// which the end user is able to call, and calls the underlying RPC.
|
||||
// Normally, this method receives a single argument (a struct or instance
|
||||
// corresponding to the RPC request object). Defining this field will
|
||||
// add one or more overloads providing flattened or simpler method signatures
|
||||
// in some languages.
|
||||
//
|
||||
// The fields on the method signature are provided as a comma-separated
|
||||
// string.
|
||||
//
|
||||
// For example, the proto RPC and annotation:
|
||||
//
|
||||
// rpc CreateSubscription(CreateSubscriptionRequest)
|
||||
// returns (Subscription) {
|
||||
// option (google.api.method_signature) = "name,topic";
|
||||
// }
|
||||
//
|
||||
// Would add the following Java overload (in addition to the method accepting
|
||||
// the request object):
|
||||
//
|
||||
// public final Subscription createSubscription(String name, String topic)
|
||||
//
|
||||
// The following backwards-compatibility guidelines apply:
|
||||
//
|
||||
// * Adding this annotation to an unannotated method is backwards
|
||||
// compatible.
|
||||
// * Adding this annotation to a method which already has existing
|
||||
// method signature annotations is backwards compatible if and only if
|
||||
// the new method signature annotation is last in the sequence.
|
||||
// * Modifying or removing an existing method signature annotation is
|
||||
// a breaking change.
|
||||
// * Re-ordering existing method signature annotations is a breaking
|
||||
// change.
|
||||
repeated string method_signature = 1051;
|
||||
}
|
||||
|
||||
extend google.protobuf.ServiceOptions {
|
||||
// The hostname for this service.
|
||||
// This should be specified with no prefix or protocol.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// service Foo {
|
||||
// option (google.api.default_host) = "foo.googleapi.com";
|
||||
// ...
|
||||
// }
|
||||
string default_host = 1049;
|
||||
|
||||
// OAuth scopes needed for the client.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// service Foo {
|
||||
// option (google.api.oauth_scopes) = \
|
||||
// "https://www.googleapis.com/auth/cloud-platform";
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// If there is more than one scope, use a comma-separated string:
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// service Foo {
|
||||
// option (google.api.oauth_scopes) = \
|
||||
// "https://www.googleapis.com/auth/cloud-platform,"
|
||||
// "https://www.googleapis.com/auth/monitoring";
|
||||
// ...
|
||||
// }
|
||||
string oauth_scopes = 1050;
|
||||
|
||||
// The API version of this service, which should be sent by version-aware
|
||||
// clients to the service. This allows services to abide by the schema and
|
||||
// behavior of the service at the time this API version was deployed.
|
||||
// The format of the API version must be treated as opaque by clients.
|
||||
// Services may use a format with an apparent structure, but clients must
|
||||
// not rely on this to determine components within an API version, or attempt
|
||||
// to construct other valid API versions. Note that this is for upcoming
|
||||
// functionality and may not be implemented for all services.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// service Foo {
|
||||
// option (google.api.api_version) = "v1_20230821_preview";
|
||||
// }
|
||||
string api_version = 525000001;
|
||||
}
|
||||
|
||||
// Required information for every language.
|
||||
message CommonLanguageSettings {
|
||||
// Link to automatically generated reference documentation. Example:
|
||||
// https://cloud.google.com/nodejs/docs/reference/asset/latest
|
||||
string reference_docs_uri = 1 [deprecated = true];
|
||||
|
||||
// The destination where API teams want this client library to be published.
|
||||
repeated ClientLibraryDestination destinations = 2;
|
||||
|
||||
// Configuration for which RPCs should be generated in the GAPIC client.
|
||||
SelectiveGapicGeneration selective_gapic_generation = 3;
|
||||
}
|
||||
|
||||
// Details about how and where to publish client libraries.
|
||||
message ClientLibrarySettings {
|
||||
// Version of the API to apply these settings to. This is the full protobuf
|
||||
// package for the API, ending in the version element.
|
||||
// Examples: "google.cloud.speech.v1" and "google.spanner.admin.database.v1".
|
||||
string version = 1;
|
||||
|
||||
// Launch stage of this version of the API.
|
||||
LaunchStage launch_stage = 2;
|
||||
|
||||
// When using transport=rest, the client request will encode enums as
|
||||
// numbers rather than strings.
|
||||
bool rest_numeric_enums = 3;
|
||||
|
||||
// Settings for legacy Java features, supported in the Service YAML.
|
||||
JavaSettings java_settings = 21;
|
||||
|
||||
// Settings for C++ client libraries.
|
||||
CppSettings cpp_settings = 22;
|
||||
|
||||
// Settings for PHP client libraries.
|
||||
PhpSettings php_settings = 23;
|
||||
|
||||
// Settings for Python client libraries.
|
||||
PythonSettings python_settings = 24;
|
||||
|
||||
// Settings for Node client libraries.
|
||||
NodeSettings node_settings = 25;
|
||||
|
||||
// Settings for .NET client libraries.
|
||||
DotnetSettings dotnet_settings = 26;
|
||||
|
||||
// Settings for Ruby client libraries.
|
||||
RubySettings ruby_settings = 27;
|
||||
|
||||
// Settings for Go client libraries.
|
||||
GoSettings go_settings = 28;
|
||||
}
|
||||
|
||||
// This message configures the settings for publishing [Google Cloud Client
|
||||
// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries)
|
||||
// generated from the service config.
|
||||
message Publishing {
|
||||
// A list of API method settings, e.g. the behavior for methods that use the
|
||||
// long-running operation pattern.
|
||||
repeated MethodSettings method_settings = 2;
|
||||
|
||||
// Link to a *public* URI where users can report issues. Example:
|
||||
// https://issuetracker.google.com/issues/new?component=190865&template=1161103
|
||||
string new_issue_uri = 101;
|
||||
|
||||
// Link to product home page. Example:
|
||||
// https://cloud.google.com/asset-inventory/docs/overview
|
||||
string documentation_uri = 102;
|
||||
|
||||
// Used as a tracking tag when collecting data about the APIs developer
|
||||
// relations artifacts like docs, packages delivered to package managers,
|
||||
// etc. Example: "speech".
|
||||
string api_short_name = 103;
|
||||
|
||||
// GitHub label to apply to issues and pull requests opened for this API.
|
||||
string github_label = 104;
|
||||
|
||||
// GitHub teams to be added to CODEOWNERS in the directory in GitHub
|
||||
// containing source code for the client libraries for this API.
|
||||
repeated string codeowner_github_teams = 105;
|
||||
|
||||
// A prefix used in sample code when demarking regions to be included in
|
||||
// documentation.
|
||||
string doc_tag_prefix = 106;
|
||||
|
||||
// For whom the client library is being published.
|
||||
ClientLibraryOrganization organization = 107;
|
||||
|
||||
// Client library settings. If the same version string appears multiple
|
||||
// times in this list, then the last one wins. Settings from earlier
|
||||
// settings with the same version string are discarded.
|
||||
repeated ClientLibrarySettings library_settings = 109;
|
||||
|
||||
// Optional link to proto reference documentation. Example:
|
||||
// https://cloud.google.com/pubsub/lite/docs/reference/rpc
|
||||
string proto_reference_documentation_uri = 110;
|
||||
|
||||
// Optional link to REST reference documentation. Example:
|
||||
// https://cloud.google.com/pubsub/lite/docs/reference/rest
|
||||
string rest_reference_documentation_uri = 111;
|
||||
}
|
||||
|
||||
// Settings for Java client libraries.
|
||||
message JavaSettings {
|
||||
// The package name to use in Java. Clobbers the java_package option
|
||||
// set in the protobuf. This should be used **only** by APIs
|
||||
// who have already set the language_settings.java.package_name" field
|
||||
// in gapic.yaml. API teams should use the protobuf java_package option
|
||||
// where possible.
|
||||
//
|
||||
// Example of a YAML configuration::
|
||||
//
|
||||
// publishing:
|
||||
// java_settings:
|
||||
// library_package: com.google.cloud.pubsub.v1
|
||||
string library_package = 1;
|
||||
|
||||
// Configure the Java class name to use instead of the service's for its
|
||||
// corresponding generated GAPIC client. Keys are fully-qualified
|
||||
// service names as they appear in the protobuf (including the full
|
||||
// the language_settings.java.interface_names" field in gapic.yaml. API
|
||||
// teams should otherwise use the service name as it appears in the
|
||||
// protobuf.
|
||||
//
|
||||
// Example of a YAML configuration::
|
||||
//
|
||||
// publishing:
|
||||
// java_settings:
|
||||
// service_class_names:
|
||||
// - google.pubsub.v1.Publisher: TopicAdmin
|
||||
// - google.pubsub.v1.Subscriber: SubscriptionAdmin
|
||||
map<string, string> service_class_names = 2;
|
||||
|
||||
// Some settings.
|
||||
CommonLanguageSettings common = 3;
|
||||
}
|
||||
|
||||
// Settings for C++ client libraries.
|
||||
message CppSettings {
|
||||
// Some settings.
|
||||
CommonLanguageSettings common = 1;
|
||||
}
|
||||
|
||||
// Settings for Php client libraries.
|
||||
message PhpSettings {
|
||||
// Some settings.
|
||||
CommonLanguageSettings common = 1;
|
||||
}
|
||||
|
||||
// Settings for Python client libraries.
|
||||
message PythonSettings {
|
||||
// Experimental features to be included during client library generation.
|
||||
// These fields will be deprecated once the feature graduates and is enabled
|
||||
// by default.
|
||||
message ExperimentalFeatures {
|
||||
// Enables generation of asynchronous REST clients if `rest` transport is
|
||||
// enabled. By default, asynchronous REST clients will not be generated.
|
||||
// This feature will be enabled by default 1 month after launching the
|
||||
// feature in preview packages.
|
||||
bool rest_async_io_enabled = 1;
|
||||
|
||||
// Enables generation of protobuf code using new types that are more
|
||||
// Pythonic which are included in `protobuf>=5.29.x`. This feature will be
|
||||
// enabled by default 1 month after launching the feature in preview
|
||||
// packages.
|
||||
bool protobuf_pythonic_types_enabled = 2;
|
||||
}
|
||||
|
||||
// Some settings.
|
||||
CommonLanguageSettings common = 1;
|
||||
|
||||
// Experimental features to be included during client library generation.
|
||||
ExperimentalFeatures experimental_features = 2;
|
||||
}
|
||||
|
||||
// Settings for Node client libraries.
|
||||
message NodeSettings {
|
||||
// Some settings.
|
||||
CommonLanguageSettings common = 1;
|
||||
}
|
||||
|
||||
// Settings for Dotnet client libraries.
|
||||
message DotnetSettings {
|
||||
// Some settings.
|
||||
CommonLanguageSettings common = 1;
|
||||
|
||||
// Map from original service names to renamed versions.
|
||||
// This is used when the default generated types
|
||||
// would cause a naming conflict. (Neither name is
|
||||
// fully-qualified.)
|
||||
// Example: Subscriber to SubscriberServiceApi.
|
||||
map<string, string> renamed_services = 2;
|
||||
|
||||
// Map from full resource types to the effective short name
|
||||
// for the resource. This is used when otherwise resource
|
||||
// named from different services would cause naming collisions.
|
||||
// Example entry:
|
||||
// "datalabeling.googleapis.com/Dataset": "DataLabelingDataset"
|
||||
map<string, string> renamed_resources = 3;
|
||||
|
||||
// List of full resource types to ignore during generation.
|
||||
// This is typically used for API-specific Location resources,
|
||||
// which should be handled by the generator as if they were actually
|
||||
// the common Location resources.
|
||||
// Example entry: "documentai.googleapis.com/Location"
|
||||
repeated string ignored_resources = 4;
|
||||
|
||||
// Namespaces which must be aliased in snippets due to
|
||||
// a known (but non-generator-predictable) naming collision
|
||||
repeated string forced_namespace_aliases = 5;
|
||||
|
||||
// Method signatures (in the form "service.method(signature)")
|
||||
// which are provided separately, so shouldn't be generated.
|
||||
// Snippets *calling* these methods are still generated, however.
|
||||
repeated string handwritten_signatures = 6;
|
||||
}
|
||||
|
||||
// Settings for Ruby client libraries.
|
||||
message RubySettings {
|
||||
// Some settings.
|
||||
CommonLanguageSettings common = 1;
|
||||
}
|
||||
|
||||
// Settings for Go client libraries.
|
||||
message GoSettings {
|
||||
// Some settings.
|
||||
CommonLanguageSettings common = 1;
|
||||
|
||||
// Map of service names to renamed services. Keys are the package relative
|
||||
// service names and values are the name to be used for the service client
|
||||
// and call options.
|
||||
//
|
||||
// publishing:
|
||||
// go_settings:
|
||||
// renamed_services:
|
||||
// Publisher: TopicAdmin
|
||||
map<string, string> renamed_services = 2;
|
||||
}
|
||||
|
||||
// Describes the generator configuration for a method.
|
||||
message MethodSettings {
|
||||
// Describes settings to use when generating API methods that use the
|
||||
// long-running operation pattern.
|
||||
// All default values below are from those used in the client library
|
||||
// generators (e.g.
|
||||
// [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)).
|
||||
message LongRunning {
|
||||
// Initial delay after which the first poll request will be made.
|
||||
// Default value: 5 seconds.
|
||||
google.protobuf.Duration initial_poll_delay = 1;
|
||||
|
||||
// Multiplier to gradually increase delay between subsequent polls until it
|
||||
// reaches max_poll_delay.
|
||||
// Default value: 1.5.
|
||||
float poll_delay_multiplier = 2;
|
||||
|
||||
// Maximum time between two subsequent poll requests.
|
||||
// Default value: 45 seconds.
|
||||
google.protobuf.Duration max_poll_delay = 3;
|
||||
|
||||
// Total polling timeout.
|
||||
// Default value: 5 minutes.
|
||||
google.protobuf.Duration total_poll_timeout = 4;
|
||||
}
|
||||
|
||||
// The fully qualified name of the method, for which the options below apply.
|
||||
// This is used to find the method to apply the options.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// publishing:
|
||||
// method_settings:
|
||||
// - selector: google.storage.control.v2.StorageControl.CreateFolder
|
||||
// # method settings for CreateFolder...
|
||||
string selector = 1;
|
||||
|
||||
// Describes settings to use for long-running operations when generating
|
||||
// API methods for RPCs. Complements RPCs that use the annotations in
|
||||
// google/longrunning/operations.proto.
|
||||
//
|
||||
// Example of a YAML configuration::
|
||||
//
|
||||
// publishing:
|
||||
// method_settings:
|
||||
// - selector: google.cloud.speech.v2.Speech.BatchRecognize
|
||||
// long_running:
|
||||
// initial_poll_delay: 60s # 1 minute
|
||||
// poll_delay_multiplier: 1.5
|
||||
// max_poll_delay: 360s # 6 minutes
|
||||
// total_poll_timeout: 54000s # 90 minutes
|
||||
LongRunning long_running = 2;
|
||||
|
||||
// List of top-level fields of the request message, that should be
|
||||
// automatically populated by the client libraries based on their
|
||||
// (google.api.field_info).format. Currently supported format: UUID4.
|
||||
//
|
||||
// Example of a YAML configuration:
|
||||
//
|
||||
// publishing:
|
||||
// method_settings:
|
||||
// - selector: google.example.v1.ExampleService.CreateExample
|
||||
// auto_populated_fields:
|
||||
// - request_id
|
||||
repeated string auto_populated_fields = 3;
|
||||
}
|
||||
|
||||
// The organization for which the client libraries are being published.
|
||||
// Affects the url where generated docs are published, etc.
|
||||
enum ClientLibraryOrganization {
|
||||
// Not useful.
|
||||
CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED = 0;
|
||||
|
||||
// Google Cloud Platform Org.
|
||||
CLOUD = 1;
|
||||
|
||||
// Ads (Advertising) Org.
|
||||
ADS = 2;
|
||||
|
||||
// Photos Org.
|
||||
PHOTOS = 3;
|
||||
|
||||
// Street View Org.
|
||||
STREET_VIEW = 4;
|
||||
|
||||
// Shopping Org.
|
||||
SHOPPING = 5;
|
||||
|
||||
// Geo Org.
|
||||
GEO = 6;
|
||||
|
||||
// Generative AI - https://developers.generativeai.google
|
||||
GENERATIVE_AI = 7;
|
||||
}
|
||||
|
||||
// To where should client libraries be published?
|
||||
enum ClientLibraryDestination {
|
||||
// Client libraries will neither be generated nor published to package
|
||||
// managers.
|
||||
CLIENT_LIBRARY_DESTINATION_UNSPECIFIED = 0;
|
||||
|
||||
// Generate the client library in a repo under github.com/googleapis,
|
||||
// but don't publish it to package managers.
|
||||
GITHUB = 10;
|
||||
|
||||
// Publish the library to package managers like nuget.org and npmjs.com.
|
||||
PACKAGE_MANAGER = 20;
|
||||
}
|
||||
|
||||
// This message is used to configure the generation of a subset of the RPCs in
|
||||
// a service for client libraries.
|
||||
message SelectiveGapicGeneration {
|
||||
// An allowlist of the fully qualified names of RPCs that should be included
|
||||
// on public client surfaces.
|
||||
repeated string methods = 1;
|
||||
|
||||
// Setting this to true indicates to the client generators that methods
|
||||
// that would be excluded from the generation should instead be generated
|
||||
// in a way that indicates these methods should not be consumed by
|
||||
// end users. How this is expressed is up to individual language
|
||||
// implementations to decide. Some examples may be: added annotations,
|
||||
// obfuscated identifiers, or other language idiomatic patterns.
|
||||
bool generate_omitted_as_internal = 2;
|
||||
}
|
||||
104
server/node_modules/@google-cloud/firestore/build/protos/google/api/field_behavior.proto
generated
vendored
Normal file
104
server/node_modules/@google-cloud/firestore/build/protos/google/api/field_behavior.proto
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.api;
|
||||
|
||||
import "google/protobuf/descriptor.proto";
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "FieldBehaviorProto";
|
||||
option java_package = "com.google.api";
|
||||
option objc_class_prefix = "GAPI";
|
||||
|
||||
extend google.protobuf.FieldOptions {
|
||||
// A designation of a specific field behavior (required, output only, etc.)
|
||||
// in protobuf messages.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// string name = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
// State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
// google.protobuf.Duration ttl = 1
|
||||
// [(google.api.field_behavior) = INPUT_ONLY];
|
||||
// google.protobuf.Timestamp expire_time = 1
|
||||
// [(google.api.field_behavior) = OUTPUT_ONLY,
|
||||
// (google.api.field_behavior) = IMMUTABLE];
|
||||
repeated google.api.FieldBehavior field_behavior = 1052 [packed = false];
|
||||
}
|
||||
|
||||
// An indicator of the behavior of a given field (for example, that a field
|
||||
// is required in requests, or given as output but ignored as input).
|
||||
// This **does not** change the behavior in protocol buffers itself; it only
|
||||
// denotes the behavior and may affect how API tooling handles the field.
|
||||
//
|
||||
// Note: This enum **may** receive new values in the future.
|
||||
enum FieldBehavior {
|
||||
// Conventional default for enums. Do not use this.
|
||||
FIELD_BEHAVIOR_UNSPECIFIED = 0;
|
||||
|
||||
// Specifically denotes a field as optional.
|
||||
// While all fields in protocol buffers are optional, this may be specified
|
||||
// for emphasis if appropriate.
|
||||
OPTIONAL = 1;
|
||||
|
||||
// Denotes a field as required.
|
||||
// This indicates that the field **must** be provided as part of the request,
|
||||
// and failure to do so will cause an error (usually `INVALID_ARGUMENT`).
|
||||
REQUIRED = 2;
|
||||
|
||||
// Denotes a field as output only.
|
||||
// This indicates that the field is provided in responses, but including the
|
||||
// field in a request does nothing (the server *must* ignore it and
|
||||
// *must not* throw an error as a result of the field's presence).
|
||||
OUTPUT_ONLY = 3;
|
||||
|
||||
// Denotes a field as input only.
|
||||
// This indicates that the field is provided in requests, and the
|
||||
// corresponding field is not included in output.
|
||||
INPUT_ONLY = 4;
|
||||
|
||||
// Denotes a field as immutable.
|
||||
// This indicates that the field may be set once in a request to create a
|
||||
// resource, but may not be changed thereafter.
|
||||
IMMUTABLE = 5;
|
||||
|
||||
// Denotes that a (repeated) field is an unordered list.
|
||||
// This indicates that the service may provide the elements of the list
|
||||
// in any arbitrary order, rather than the order the user originally
|
||||
// provided. Additionally, the list's order may or may not be stable.
|
||||
UNORDERED_LIST = 6;
|
||||
|
||||
// Denotes that this field returns a non-empty default value if not set.
|
||||
// This indicates that if the user provides the empty value in a request,
|
||||
// a non-empty value will be returned. The user will not be aware of what
|
||||
// non-empty value to expect.
|
||||
NON_EMPTY_DEFAULT = 7;
|
||||
|
||||
// Denotes that the field in a resource (a message annotated with
|
||||
// google.api.resource) is used in the resource name to uniquely identify the
|
||||
// resource. For AIP-compliant APIs, this should only be applied to the
|
||||
// `name` field on the resource.
|
||||
//
|
||||
// This behavior should not be applied to references to other resources within
|
||||
// the message.
|
||||
//
|
||||
// The identifier field of resources often have different field behavior
|
||||
// depending on the request it is embedded in (e.g. for Create methods name
|
||||
// is optional and unused, while for Update methods it is required). Instead
|
||||
// of method-specific annotations, only `IDENTIFIER` is required.
|
||||
IDENTIFIER = 8;
|
||||
}
|
||||
371
server/node_modules/@google-cloud/firestore/build/protos/google/api/http.proto
generated
vendored
Normal file
371
server/node_modules/@google-cloud/firestore/build/protos/google/api/http.proto
generated
vendored
Normal file
@@ -0,0 +1,371 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.api;
|
||||
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "HttpProto";
|
||||
option java_package = "com.google.api";
|
||||
option objc_class_prefix = "GAPI";
|
||||
|
||||
// Defines the HTTP configuration for an API service. It contains a list of
|
||||
// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
|
||||
// to one or more HTTP REST API methods.
|
||||
message Http {
|
||||
// A list of HTTP configuration rules that apply to individual API methods.
|
||||
//
|
||||
// **NOTE:** All service configuration rules follow "last one wins" order.
|
||||
repeated HttpRule rules = 1;
|
||||
|
||||
// When set to true, URL path parameters will be fully URI-decoded except in
|
||||
// cases of single segment matches in reserved expansion, where "%2F" will be
|
||||
// left encoded.
|
||||
//
|
||||
// The default behavior is to not decode RFC 6570 reserved characters in multi
|
||||
// segment matches.
|
||||
bool fully_decode_reserved_expansion = 2;
|
||||
}
|
||||
|
||||
// gRPC Transcoding
|
||||
//
|
||||
// gRPC Transcoding is a feature for mapping between a gRPC method and one or
|
||||
// more HTTP REST endpoints. It allows developers to build a single API service
|
||||
// that supports both gRPC APIs and REST APIs. Many systems, including [Google
|
||||
// APIs](https://github.com/googleapis/googleapis),
|
||||
// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC
|
||||
// Gateway](https://github.com/grpc-ecosystem/grpc-gateway),
|
||||
// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature
|
||||
// and use it for large scale production services.
|
||||
//
|
||||
// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies
|
||||
// how different portions of the gRPC request message are mapped to the URL
|
||||
// path, URL query parameters, and HTTP request body. It also controls how the
|
||||
// gRPC response message is mapped to the HTTP response body. `HttpRule` is
|
||||
// typically specified as an `google.api.http` annotation on the gRPC method.
|
||||
//
|
||||
// Each mapping specifies a URL path template and an HTTP method. The path
|
||||
// template may refer to one or more fields in the gRPC request message, as long
|
||||
// as each field is a non-repeated field with a primitive (non-message) type.
|
||||
// The path template controls how fields of the request message are mapped to
|
||||
// the URL path.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// get: "/v1/{name=messages/*}"
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message GetMessageRequest {
|
||||
// string name = 1; // Mapped to URL path.
|
||||
// }
|
||||
// message Message {
|
||||
// string text = 1; // The resource content.
|
||||
// }
|
||||
//
|
||||
// This enables an HTTP REST to gRPC mapping as below:
|
||||
//
|
||||
// - HTTP: `GET /v1/messages/123456`
|
||||
// - gRPC: `GetMessage(name: "messages/123456")`
|
||||
//
|
||||
// Any fields in the request message which are not bound by the path template
|
||||
// automatically become HTTP query parameters if there is no HTTP request body.
|
||||
// For example:
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// get:"/v1/messages/{message_id}"
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message GetMessageRequest {
|
||||
// message SubMessage {
|
||||
// string subfield = 1;
|
||||
// }
|
||||
// string message_id = 1; // Mapped to URL path.
|
||||
// int64 revision = 2; // Mapped to URL query parameter `revision`.
|
||||
// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`.
|
||||
// }
|
||||
//
|
||||
// This enables a HTTP JSON to RPC mapping as below:
|
||||
//
|
||||
// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo`
|
||||
// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub:
|
||||
// SubMessage(subfield: "foo"))`
|
||||
//
|
||||
// Note that fields which are mapped to URL query parameters must have a
|
||||
// primitive type or a repeated primitive type or a non-repeated message type.
|
||||
// In the case of a repeated type, the parameter can be repeated in the URL
|
||||
// as `...?param=A¶m=B`. In the case of a message type, each field of the
|
||||
// message is mapped to a separate parameter, such as
|
||||
// `...?foo.a=A&foo.b=B&foo.c=C`.
|
||||
//
|
||||
// For HTTP methods that allow a request body, the `body` field
|
||||
// specifies the mapping. Consider a REST update method on the
|
||||
// message resource collection:
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// patch: "/v1/messages/{message_id}"
|
||||
// body: "message"
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message UpdateMessageRequest {
|
||||
// string message_id = 1; // mapped to the URL
|
||||
// Message message = 2; // mapped to the body
|
||||
// }
|
||||
//
|
||||
// The following HTTP JSON to RPC mapping is enabled, where the
|
||||
// representation of the JSON in the request body is determined by
|
||||
// protos JSON encoding:
|
||||
//
|
||||
// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }`
|
||||
// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
|
||||
//
|
||||
// The special name `*` can be used in the body mapping to define that
|
||||
// every field not bound by the path template should be mapped to the
|
||||
// request body. This enables the following alternative definition of
|
||||
// the update method:
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc UpdateMessage(Message) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// patch: "/v1/messages/{message_id}"
|
||||
// body: "*"
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message Message {
|
||||
// string message_id = 1;
|
||||
// string text = 2;
|
||||
// }
|
||||
//
|
||||
//
|
||||
// The following HTTP JSON to RPC mapping is enabled:
|
||||
//
|
||||
// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }`
|
||||
// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")`
|
||||
//
|
||||
// Note that when using `*` in the body mapping, it is not possible to
|
||||
// have HTTP parameters, as all fields not bound by the path end in
|
||||
// the body. This makes this option more rarely used in practice when
|
||||
// defining REST APIs. The common usage of `*` is in custom methods
|
||||
// which don't use the URL at all for transferring data.
|
||||
//
|
||||
// It is possible to define multiple HTTP methods for one RPC by using
|
||||
// the `additional_bindings` option. Example:
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// get: "/v1/messages/{message_id}"
|
||||
// additional_bindings {
|
||||
// get: "/v1/users/{user_id}/messages/{message_id}"
|
||||
// }
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message GetMessageRequest {
|
||||
// string message_id = 1;
|
||||
// string user_id = 2;
|
||||
// }
|
||||
//
|
||||
// This enables the following two alternative HTTP JSON to RPC mappings:
|
||||
//
|
||||
// - HTTP: `GET /v1/messages/123456`
|
||||
// - gRPC: `GetMessage(message_id: "123456")`
|
||||
//
|
||||
// - HTTP: `GET /v1/users/me/messages/123456`
|
||||
// - gRPC: `GetMessage(user_id: "me" message_id: "123456")`
|
||||
//
|
||||
// Rules for HTTP mapping
|
||||
//
|
||||
// 1. Leaf request fields (recursive expansion nested messages in the request
|
||||
// message) are classified into three categories:
|
||||
// - Fields referred by the path template. They are passed via the URL path.
|
||||
// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They
|
||||
// are passed via the HTTP
|
||||
// request body.
|
||||
// - All other fields are passed via the URL query parameters, and the
|
||||
// parameter name is the field path in the request message. A repeated
|
||||
// field can be represented as multiple query parameters under the same
|
||||
// name.
|
||||
// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL
|
||||
// query parameter, all fields
|
||||
// are passed via URL path and HTTP request body.
|
||||
// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP
|
||||
// request body, all
|
||||
// fields are passed via URL path and URL query parameters.
|
||||
//
|
||||
// Path template syntax
|
||||
//
|
||||
// Template = "/" Segments [ Verb ] ;
|
||||
// Segments = Segment { "/" Segment } ;
|
||||
// Segment = "*" | "**" | LITERAL | Variable ;
|
||||
// Variable = "{" FieldPath [ "=" Segments ] "}" ;
|
||||
// FieldPath = IDENT { "." IDENT } ;
|
||||
// Verb = ":" LITERAL ;
|
||||
//
|
||||
// The syntax `*` matches a single URL path segment. The syntax `**` matches
|
||||
// zero or more URL path segments, which must be the last part of the URL path
|
||||
// except the `Verb`.
|
||||
//
|
||||
// The syntax `Variable` matches part of the URL path as specified by its
|
||||
// template. A variable template must not contain other variables. If a variable
|
||||
// matches a single path segment, its template may be omitted, e.g. `{var}`
|
||||
// is equivalent to `{var=*}`.
|
||||
//
|
||||
// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL`
|
||||
// contains any reserved character, such characters should be percent-encoded
|
||||
// before the matching.
|
||||
//
|
||||
// If a variable contains exactly one path segment, such as `"{var}"` or
|
||||
// `"{var=*}"`, when such a variable is expanded into a URL path on the client
|
||||
// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The
|
||||
// server side does the reverse decoding. Such variables show up in the
|
||||
// [Discovery
|
||||
// Document](https://developers.google.com/discovery/v1/reference/apis) as
|
||||
// `{var}`.
|
||||
//
|
||||
// If a variable contains multiple path segments, such as `"{var=foo/*}"`
|
||||
// or `"{var=**}"`, when such a variable is expanded into a URL path on the
|
||||
// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded.
|
||||
// The server side does the reverse decoding, except "%2F" and "%2f" are left
|
||||
// unchanged. Such variables show up in the
|
||||
// [Discovery
|
||||
// Document](https://developers.google.com/discovery/v1/reference/apis) as
|
||||
// `{+var}`.
|
||||
//
|
||||
// Using gRPC API Service Configuration
|
||||
//
|
||||
// gRPC API Service Configuration (service config) is a configuration language
|
||||
// for configuring a gRPC service to become a user-facing product. The
|
||||
// service config is simply the YAML representation of the `google.api.Service`
|
||||
// proto message.
|
||||
//
|
||||
// As an alternative to annotating your proto file, you can configure gRPC
|
||||
// transcoding in your service config YAML files. You do this by specifying a
|
||||
// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same
|
||||
// effect as the proto annotation. This can be particularly useful if you
|
||||
// have a proto that is reused in multiple services. Note that any transcoding
|
||||
// specified in the service config will override any matching transcoding
|
||||
// configuration in the proto.
|
||||
//
|
||||
// The following example selects a gRPC method and applies an `HttpRule` to it:
|
||||
//
|
||||
// http:
|
||||
// rules:
|
||||
// - selector: example.v1.Messaging.GetMessage
|
||||
// get: /v1/messages/{message_id}/{sub.subfield}
|
||||
//
|
||||
// Special notes
|
||||
//
|
||||
// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the
|
||||
// proto to JSON conversion must follow the [proto3
|
||||
// specification](https://developers.google.com/protocol-buffers/docs/proto3#json).
|
||||
//
|
||||
// While the single segment variable follows the semantics of
|
||||
// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String
|
||||
// Expansion, the multi segment variable **does not** follow RFC 6570 Section
|
||||
// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion
|
||||
// does not expand special characters like `?` and `#`, which would lead
|
||||
// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding
|
||||
// for multi segment variables.
|
||||
//
|
||||
// The path variables **must not** refer to any repeated or mapped field,
|
||||
// because client libraries are not capable of handling such variable expansion.
|
||||
//
|
||||
// The path variables **must not** capture the leading "/" character. The reason
|
||||
// is that the most common use case "{var}" does not capture the leading "/"
|
||||
// character. For consistency, all path variables must share the same behavior.
|
||||
//
|
||||
// Repeated message fields must not be mapped to URL query parameters, because
|
||||
// no client library can support such complicated mapping.
|
||||
//
|
||||
// If an API needs to use a JSON array for request or response body, it can map
|
||||
// the request or response body to a repeated field. However, some gRPC
|
||||
// Transcoding implementations may not support this feature.
|
||||
message HttpRule {
|
||||
// Selects a method to which this rule applies.
|
||||
//
|
||||
// Refer to [selector][google.api.DocumentationRule.selector] for syntax
|
||||
// details.
|
||||
string selector = 1;
|
||||
|
||||
// Determines the URL pattern is matched by this rules. This pattern can be
|
||||
// used with any of the {get|put|post|delete|patch} methods. A custom method
|
||||
// can be defined using the 'custom' field.
|
||||
oneof pattern {
|
||||
// Maps to HTTP GET. Used for listing and getting information about
|
||||
// resources.
|
||||
string get = 2;
|
||||
|
||||
// Maps to HTTP PUT. Used for replacing a resource.
|
||||
string put = 3;
|
||||
|
||||
// Maps to HTTP POST. Used for creating a resource or performing an action.
|
||||
string post = 4;
|
||||
|
||||
// Maps to HTTP DELETE. Used for deleting a resource.
|
||||
string delete = 5;
|
||||
|
||||
// Maps to HTTP PATCH. Used for updating a resource.
|
||||
string patch = 6;
|
||||
|
||||
// The custom pattern is used for specifying an HTTP method that is not
|
||||
// included in the `pattern` field, such as HEAD, or "*" to leave the
|
||||
// HTTP method unspecified for this rule. The wild-card rule is useful
|
||||
// for services that provide content to Web (HTML) clients.
|
||||
CustomHttpPattern custom = 8;
|
||||
}
|
||||
|
||||
// The name of the request field whose value is mapped to the HTTP request
|
||||
// body, or `*` for mapping all request fields not captured by the path
|
||||
// pattern to the HTTP body, or omitted for not having any HTTP request body.
|
||||
//
|
||||
// NOTE: the referred field must be present at the top-level of the request
|
||||
// message type.
|
||||
string body = 7;
|
||||
|
||||
// Optional. The name of the response field whose value is mapped to the HTTP
|
||||
// response body. When omitted, the entire response message will be used
|
||||
// as the HTTP response body.
|
||||
//
|
||||
// NOTE: The referred field must be present at the top-level of the response
|
||||
// message type.
|
||||
string response_body = 12;
|
||||
|
||||
// Additional HTTP bindings for the selector. Nested bindings must
|
||||
// not contain an `additional_bindings` field themselves (that is,
|
||||
// the nesting may only be one level deep).
|
||||
repeated HttpRule additional_bindings = 11;
|
||||
}
|
||||
|
||||
// A custom pattern is used for defining custom HTTP verb.
|
||||
message CustomHttpPattern {
|
||||
// The name of this custom HTTP verb.
|
||||
string kind = 1;
|
||||
|
||||
// The path matched by this custom verb.
|
||||
string path = 2;
|
||||
}
|
||||
72
server/node_modules/@google-cloud/firestore/build/protos/google/api/launch_stage.proto
generated
vendored
Normal file
72
server/node_modules/@google-cloud/firestore/build/protos/google/api/launch_stage.proto
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.api;
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/api;api";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "LaunchStageProto";
|
||||
option java_package = "com.google.api";
|
||||
option objc_class_prefix = "GAPI";
|
||||
|
||||
// The launch stage as defined by [Google Cloud Platform
|
||||
// Launch Stages](https://cloud.google.com/terms/launch-stages).
|
||||
enum LaunchStage {
|
||||
// Do not use this default value.
|
||||
LAUNCH_STAGE_UNSPECIFIED = 0;
|
||||
|
||||
// The feature is not yet implemented. Users can not use it.
|
||||
UNIMPLEMENTED = 6;
|
||||
|
||||
// Prelaunch features are hidden from users and are only visible internally.
|
||||
PRELAUNCH = 7;
|
||||
|
||||
// Early Access features are limited to a closed group of testers. To use
|
||||
// these features, you must sign up in advance and sign a Trusted Tester
|
||||
// agreement (which includes confidentiality provisions). These features may
|
||||
// be unstable, changed in backward-incompatible ways, and are not
|
||||
// guaranteed to be released.
|
||||
EARLY_ACCESS = 1;
|
||||
|
||||
// Alpha is a limited availability test for releases before they are cleared
|
||||
// for widespread use. By Alpha, all significant design issues are resolved
|
||||
// and we are in the process of verifying functionality. Alpha customers
|
||||
// need to apply for access, agree to applicable terms, and have their
|
||||
// projects allowlisted. Alpha releases don't have to be feature complete,
|
||||
// no SLAs are provided, and there are no technical support obligations, but
|
||||
// they will be far enough along that customers can actually use them in
|
||||
// test environments or for limited-use tests -- just like they would in
|
||||
// normal production cases.
|
||||
ALPHA = 2;
|
||||
|
||||
// Beta is the point at which we are ready to open a release for any
|
||||
// customer to use. There are no SLA or technical support obligations in a
|
||||
// Beta release. Products will be complete from a feature perspective, but
|
||||
// may have some open outstanding issues. Beta releases are suitable for
|
||||
// limited production use cases.
|
||||
BETA = 3;
|
||||
|
||||
// GA features are open to all developers and are considered stable and
|
||||
// fully qualified for production use.
|
||||
GA = 4;
|
||||
|
||||
// Deprecated features are scheduled to be shut down and removed. For more
|
||||
// information, see the "Deprecation Policy" section of our [Terms of
|
||||
// Service](https://cloud.google.com/terms/)
|
||||
// and the [Google Cloud Platform Subject to the Deprecation
|
||||
// Policy](https://cloud.google.com/terms/deprecation) documentation.
|
||||
DEPRECATED = 5;
|
||||
}
|
||||
243
server/node_modules/@google-cloud/firestore/build/protos/google/api/resource.proto
generated
vendored
Normal file
243
server/node_modules/@google-cloud/firestore/build/protos/google/api/resource.proto
generated
vendored
Normal file
@@ -0,0 +1,243 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.api;
|
||||
|
||||
import "google/protobuf/descriptor.proto";
|
||||
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "ResourceProto";
|
||||
option java_package = "com.google.api";
|
||||
option objc_class_prefix = "GAPI";
|
||||
|
||||
extend google.protobuf.FieldOptions {
|
||||
// An annotation that describes a resource reference, see
|
||||
// [ResourceReference][].
|
||||
google.api.ResourceReference resource_reference = 1055;
|
||||
}
|
||||
|
||||
extend google.protobuf.FileOptions {
|
||||
// An annotation that describes a resource definition without a corresponding
|
||||
// message; see [ResourceDescriptor][].
|
||||
repeated google.api.ResourceDescriptor resource_definition = 1053;
|
||||
}
|
||||
|
||||
extend google.protobuf.MessageOptions {
|
||||
// An annotation that describes a resource definition, see
|
||||
// [ResourceDescriptor][].
|
||||
google.api.ResourceDescriptor resource = 1053;
|
||||
}
|
||||
|
||||
// A simple descriptor of a resource type.
|
||||
//
|
||||
// ResourceDescriptor annotates a resource message (either by means of a
|
||||
// protobuf annotation or use in the service config), and associates the
|
||||
// resource's schema, the resource type, and the pattern of the resource name.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// message Topic {
|
||||
// // Indicates this message defines a resource schema.
|
||||
// // Declares the resource type in the format of {service}/{kind}.
|
||||
// // For Kubernetes resources, the format is {api group}/{kind}.
|
||||
// option (google.api.resource) = {
|
||||
// type: "pubsub.googleapis.com/Topic"
|
||||
// pattern: "projects/{project}/topics/{topic}"
|
||||
// };
|
||||
// }
|
||||
//
|
||||
// The ResourceDescriptor Yaml config will look like:
|
||||
//
|
||||
// resources:
|
||||
// - type: "pubsub.googleapis.com/Topic"
|
||||
// pattern: "projects/{project}/topics/{topic}"
|
||||
//
|
||||
// Sometimes, resources have multiple patterns, typically because they can
|
||||
// live under multiple parents.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// message LogEntry {
|
||||
// option (google.api.resource) = {
|
||||
// type: "logging.googleapis.com/LogEntry"
|
||||
// pattern: "projects/{project}/logs/{log}"
|
||||
// pattern: "folders/{folder}/logs/{log}"
|
||||
// pattern: "organizations/{organization}/logs/{log}"
|
||||
// pattern: "billingAccounts/{billing_account}/logs/{log}"
|
||||
// };
|
||||
// }
|
||||
//
|
||||
// The ResourceDescriptor Yaml config will look like:
|
||||
//
|
||||
// resources:
|
||||
// - type: 'logging.googleapis.com/LogEntry'
|
||||
// pattern: "projects/{project}/logs/{log}"
|
||||
// pattern: "folders/{folder}/logs/{log}"
|
||||
// pattern: "organizations/{organization}/logs/{log}"
|
||||
// pattern: "billingAccounts/{billing_account}/logs/{log}"
|
||||
message ResourceDescriptor {
|
||||
// A description of the historical or future-looking state of the
|
||||
// resource pattern.
|
||||
enum History {
|
||||
// The "unset" value.
|
||||
HISTORY_UNSPECIFIED = 0;
|
||||
|
||||
// The resource originally had one pattern and launched as such, and
|
||||
// additional patterns were added later.
|
||||
ORIGINALLY_SINGLE_PATTERN = 1;
|
||||
|
||||
// The resource has one pattern, but the API owner expects to add more
|
||||
// later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents
|
||||
// that from being necessary once there are multiple patterns.)
|
||||
FUTURE_MULTI_PATTERN = 2;
|
||||
}
|
||||
|
||||
// A flag representing a specific style that a resource claims to conform to.
|
||||
enum Style {
|
||||
// The unspecified value. Do not use.
|
||||
STYLE_UNSPECIFIED = 0;
|
||||
|
||||
// This resource is intended to be "declarative-friendly".
|
||||
//
|
||||
// Declarative-friendly resources must be more strictly consistent, and
|
||||
// setting this to true communicates to tools that this resource should
|
||||
// adhere to declarative-friendly expectations.
|
||||
//
|
||||
// Note: This is used by the API linter (linter.aip.dev) to enable
|
||||
// additional checks.
|
||||
DECLARATIVE_FRIENDLY = 1;
|
||||
}
|
||||
|
||||
// The resource type. It must be in the format of
|
||||
// {service_name}/{resource_type_kind}. The `resource_type_kind` must be
|
||||
// singular and must not include version numbers.
|
||||
//
|
||||
// Example: `storage.googleapis.com/Bucket`
|
||||
//
|
||||
// The value of the resource_type_kind must follow the regular expression
|
||||
// /[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and
|
||||
// should use PascalCase (UpperCamelCase). The maximum number of
|
||||
// characters allowed for the `resource_type_kind` is 100.
|
||||
string type = 1;
|
||||
|
||||
// Optional. The relative resource name pattern associated with this resource
|
||||
// type. The DNS prefix of the full resource name shouldn't be specified here.
|
||||
//
|
||||
// The path pattern must follow the syntax, which aligns with HTTP binding
|
||||
// syntax:
|
||||
//
|
||||
// Template = Segment { "/" Segment } ;
|
||||
// Segment = LITERAL | Variable ;
|
||||
// Variable = "{" LITERAL "}" ;
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// - "projects/{project}/topics/{topic}"
|
||||
// - "projects/{project}/knowledgeBases/{knowledge_base}"
|
||||
//
|
||||
// The components in braces correspond to the IDs for each resource in the
|
||||
// hierarchy. It is expected that, if multiple patterns are provided,
|
||||
// the same component name (e.g. "project") refers to IDs of the same
|
||||
// type of resource.
|
||||
repeated string pattern = 2;
|
||||
|
||||
// Optional. The field on the resource that designates the resource name
|
||||
// field. If omitted, this is assumed to be "name".
|
||||
string name_field = 3;
|
||||
|
||||
// Optional. The historical or future-looking state of the resource pattern.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// // The InspectTemplate message originally only supported resource
|
||||
// // names with organization, and project was added later.
|
||||
// message InspectTemplate {
|
||||
// option (google.api.resource) = {
|
||||
// type: "dlp.googleapis.com/InspectTemplate"
|
||||
// pattern:
|
||||
// "organizations/{organization}/inspectTemplates/{inspect_template}"
|
||||
// pattern: "projects/{project}/inspectTemplates/{inspect_template}"
|
||||
// history: ORIGINALLY_SINGLE_PATTERN
|
||||
// };
|
||||
// }
|
||||
History history = 4;
|
||||
|
||||
// The plural name used in the resource name and permission names, such as
|
||||
// 'projects' for the resource name of 'projects/{project}' and the permission
|
||||
// name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception
|
||||
// to this is for Nested Collections that have stuttering names, as defined
|
||||
// in [AIP-122](https://google.aip.dev/122#nested-collections), where the
|
||||
// collection ID in the resource name pattern does not necessarily directly
|
||||
// match the `plural` value.
|
||||
//
|
||||
// It is the same concept of the `plural` field in k8s CRD spec
|
||||
// https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
|
||||
//
|
||||
// Note: The plural form is required even for singleton resources. See
|
||||
// https://aip.dev/156
|
||||
string plural = 5;
|
||||
|
||||
// The same concept of the `singular` field in k8s CRD spec
|
||||
// https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
|
||||
// Such as "project" for the `resourcemanager.googleapis.com/Project` type.
|
||||
string singular = 6;
|
||||
|
||||
// Style flag(s) for this resource.
|
||||
// These indicate that a resource is expected to conform to a given
|
||||
// style. See the specific style flags for additional information.
|
||||
repeated Style style = 10;
|
||||
}
|
||||
|
||||
// Defines a proto annotation that describes a string field that refers to
|
||||
// an API resource.
|
||||
message ResourceReference {
|
||||
// The resource type that the annotated field references.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// message Subscription {
|
||||
// string topic = 2 [(google.api.resource_reference) = {
|
||||
// type: "pubsub.googleapis.com/Topic"
|
||||
// }];
|
||||
// }
|
||||
//
|
||||
// Occasionally, a field may reference an arbitrary resource. In this case,
|
||||
// APIs use the special value * in their resource reference.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// message GetIamPolicyRequest {
|
||||
// string resource = 2 [(google.api.resource_reference) = {
|
||||
// type: "*"
|
||||
// }];
|
||||
// }
|
||||
string type = 1;
|
||||
|
||||
// The resource type of a child collection that the annotated field
|
||||
// references. This is useful for annotating the `parent` field that
|
||||
// doesn't have a fixed resource type.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// message ListLogEntriesRequest {
|
||||
// string parent = 1 [(google.api.resource_reference) = {
|
||||
// child_type: "logging.googleapis.com/LogEntry"
|
||||
// };
|
||||
// }
|
||||
string child_type = 2;
|
||||
}
|
||||
107
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/admin/v1/backup.proto
generated
vendored
Normal file
107
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/admin/v1/backup.proto
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.admin.v1;
|
||||
|
||||
import "google/api/field_behavior.proto";
|
||||
import "google/api/resource.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.Admin.V1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1/admin/adminpb;adminpb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "BackupProto";
|
||||
option java_package = "com.google.firestore.admin.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::Admin::V1";
|
||||
|
||||
// A Backup of a Cloud Firestore Database.
|
||||
//
|
||||
// The backup contains all documents and index configurations for the given
|
||||
// database at a specific point in time.
|
||||
message Backup {
|
||||
option (google.api.resource) = {
|
||||
type: "firestore.googleapis.com/Backup"
|
||||
pattern: "projects/{project}/locations/{location}/backups/{backup}"
|
||||
};
|
||||
|
||||
// Backup specific statistics.
|
||||
message Stats {
|
||||
// Output only. Summation of the size of all documents and index entries in
|
||||
// the backup, measured in bytes.
|
||||
int64 size_bytes = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Output only. The total number of documents contained in the backup.
|
||||
int64 document_count = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Output only. The total number of index entries contained in the backup.
|
||||
int64 index_count = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
}
|
||||
|
||||
// Indicate the current state of the backup.
|
||||
enum State {
|
||||
// The state is unspecified.
|
||||
STATE_UNSPECIFIED = 0;
|
||||
|
||||
// The pending backup is still being created. Operations on the
|
||||
// backup will be rejected in this state.
|
||||
CREATING = 1;
|
||||
|
||||
// The backup is complete and ready to use.
|
||||
READY = 2;
|
||||
|
||||
// The backup is not available at this moment.
|
||||
NOT_AVAILABLE = 3;
|
||||
}
|
||||
|
||||
// Output only. The unique resource name of the Backup.
|
||||
//
|
||||
// Format is `projects/{project}/locations/{location}/backups/{backup}`.
|
||||
string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Output only. Name of the Firestore database that the backup is from.
|
||||
//
|
||||
// Format is `projects/{project}/databases/{database}`.
|
||||
string database = 2 [
|
||||
(google.api.field_behavior) = OUTPUT_ONLY,
|
||||
(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/Database"
|
||||
}
|
||||
];
|
||||
|
||||
// Output only. The system-generated UUID4 for the Firestore database that the
|
||||
// backup is from.
|
||||
string database_uid = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Output only. The backup contains an externally consistent copy of the
|
||||
// database at this time.
|
||||
google.protobuf.Timestamp snapshot_time = 3
|
||||
[(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Output only. The timestamp at which this backup expires.
|
||||
google.protobuf.Timestamp expire_time = 4
|
||||
[(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Output only. Statistics about the backup.
|
||||
//
|
||||
// This data only becomes available after the backup is fully materialized to
|
||||
// secondary storage. This field will be empty till then.
|
||||
Stats stats = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Output only. The current state of the backup.
|
||||
State state = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
}
|
||||
321
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/admin/v1/database.proto
generated
vendored
Normal file
321
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/admin/v1/database.proto
generated
vendored
Normal file
@@ -0,0 +1,321 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.admin.v1;
|
||||
|
||||
import "google/api/field_behavior.proto";
|
||||
import "google/api/resource.proto";
|
||||
import "google/protobuf/duration.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.Admin.V1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1/admin/adminpb;adminpb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "DatabaseProto";
|
||||
option java_package = "com.google.firestore.admin.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::Admin::V1";
|
||||
option (google.api.resource_definition) = {
|
||||
type: "firestore.googleapis.com/Operation"
|
||||
pattern: "projects/{project}/databases/{database}/operations/{operation}"
|
||||
};
|
||||
|
||||
// A Cloud Firestore Database.
|
||||
message Database {
|
||||
option (google.api.resource) = {
|
||||
type: "firestore.googleapis.com/Database"
|
||||
pattern: "projects/{project}/databases/{database}"
|
||||
style: DECLARATIVE_FRIENDLY
|
||||
};
|
||||
|
||||
// The type of the database.
|
||||
// See https://cloud.google.com/datastore/docs/firestore-or-datastore for
|
||||
// information about how to choose.
|
||||
//
|
||||
// Mode changes are only allowed if the database is empty.
|
||||
enum DatabaseType {
|
||||
// Not used.
|
||||
DATABASE_TYPE_UNSPECIFIED = 0;
|
||||
|
||||
// Firestore Native Mode
|
||||
FIRESTORE_NATIVE = 1;
|
||||
|
||||
// Firestore in Datastore Mode.
|
||||
DATASTORE_MODE = 2;
|
||||
}
|
||||
|
||||
// The type of concurrency control mode for transactions.
|
||||
enum ConcurrencyMode {
|
||||
// Not used.
|
||||
CONCURRENCY_MODE_UNSPECIFIED = 0;
|
||||
|
||||
// Use optimistic concurrency control by default. This mode is available
|
||||
// for Cloud Firestore databases.
|
||||
OPTIMISTIC = 1;
|
||||
|
||||
// Use pessimistic concurrency control by default. This mode is available
|
||||
// for Cloud Firestore databases.
|
||||
//
|
||||
// This is the default setting for Cloud Firestore.
|
||||
PESSIMISTIC = 2;
|
||||
|
||||
// Use optimistic concurrency control with entity groups by default.
|
||||
//
|
||||
// This is the only available mode for Cloud Datastore.
|
||||
//
|
||||
// This mode is also available for Cloud Firestore with Datastore Mode but
|
||||
// is not recommended.
|
||||
OPTIMISTIC_WITH_ENTITY_GROUPS = 3;
|
||||
}
|
||||
|
||||
// Point In Time Recovery feature enablement.
|
||||
enum PointInTimeRecoveryEnablement {
|
||||
// Not used.
|
||||
POINT_IN_TIME_RECOVERY_ENABLEMENT_UNSPECIFIED = 0;
|
||||
|
||||
// Reads are supported on selected versions of the data from within the past
|
||||
// 7 days:
|
||||
//
|
||||
// * Reads against any timestamp within the past hour
|
||||
// * Reads against 1-minute snapshots beyond 1 hour and within 7 days
|
||||
//
|
||||
// `version_retention_period` and `earliest_version_time` can be
|
||||
// used to determine the supported versions.
|
||||
POINT_IN_TIME_RECOVERY_ENABLED = 1;
|
||||
|
||||
// Reads are supported on any version of the data from within the past 1
|
||||
// hour.
|
||||
POINT_IN_TIME_RECOVERY_DISABLED = 2;
|
||||
}
|
||||
|
||||
// The type of App Engine integration mode.
|
||||
enum AppEngineIntegrationMode {
|
||||
// Not used.
|
||||
APP_ENGINE_INTEGRATION_MODE_UNSPECIFIED = 0;
|
||||
|
||||
// If an App Engine application exists in the same region as this database,
|
||||
// App Engine configuration will impact this database. This includes
|
||||
// disabling of the application & database, as well as disabling writes to
|
||||
// the database.
|
||||
ENABLED = 1;
|
||||
|
||||
// App Engine has no effect on the ability of this database to serve
|
||||
// requests.
|
||||
//
|
||||
// This is the default setting for databases created with the Firestore API.
|
||||
DISABLED = 2;
|
||||
}
|
||||
|
||||
// The delete protection state of the database.
|
||||
enum DeleteProtectionState {
|
||||
// The default value. Delete protection type is not specified
|
||||
DELETE_PROTECTION_STATE_UNSPECIFIED = 0;
|
||||
|
||||
// Delete protection is disabled
|
||||
DELETE_PROTECTION_DISABLED = 1;
|
||||
|
||||
// Delete protection is enabled
|
||||
DELETE_PROTECTION_ENABLED = 2;
|
||||
}
|
||||
|
||||
// The CMEK (Customer Managed Encryption Key) configuration for a Firestore
|
||||
// database. If not present, the database is secured by the default Google
|
||||
// encryption key.
|
||||
message CmekConfig {
|
||||
// Required. Only keys in the same location as this database are allowed to
|
||||
// be used for encryption.
|
||||
//
|
||||
// For Firestore's nam5 multi-region, this corresponds to Cloud KMS
|
||||
// multi-region us. For Firestore's eur3 multi-region, this corresponds to
|
||||
// Cloud KMS multi-region europe. See
|
||||
// https://cloud.google.com/kms/docs/locations.
|
||||
//
|
||||
// The expected format is
|
||||
// `projects/{project_id}/locations/{kms_location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}`.
|
||||
string kms_key_name = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Output only. Currently in-use [KMS key
|
||||
// versions](https://cloud.google.com/kms/docs/resource-hierarchy#key_versions).
|
||||
// During [key rotation](https://cloud.google.com/kms/docs/key-rotation),
|
||||
// there can be multiple in-use key versions.
|
||||
//
|
||||
// The expected format is
|
||||
// `projects/{project_id}/locations/{kms_location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{key_version}`.
|
||||
repeated string active_key_version = 2
|
||||
[(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
}
|
||||
|
||||
// Information about the provenance of this database.
|
||||
message SourceInfo {
|
||||
// Information about a backup that was used to restore a database.
|
||||
message BackupSource {
|
||||
// The resource name of the backup that was used to restore this
|
||||
// database. Format:
|
||||
// `projects/{project}/locations/{location}/backups/{backup}`.
|
||||
string backup = 1 [(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/Backup"
|
||||
}];
|
||||
}
|
||||
|
||||
// The source from which this database is derived.
|
||||
oneof source {
|
||||
// If set, this database was restored from the specified backup (or a
|
||||
// snapshot thereof).
|
||||
BackupSource backup = 1;
|
||||
}
|
||||
|
||||
// The associated long-running operation. This field may not be set after
|
||||
// the operation has completed. Format:
|
||||
// `projects/{project}/databases/{database}/operations/{operation}`.
|
||||
string operation = 3 [(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/Operation"
|
||||
}];
|
||||
}
|
||||
|
||||
// Encryption configuration for a new database being created from another
|
||||
// source.
|
||||
//
|
||||
// The source could be a [Backup][google.firestore.admin.v1.Backup] .
|
||||
message EncryptionConfig {
|
||||
// The configuration options for using Google default encryption.
|
||||
message GoogleDefaultEncryptionOptions {}
|
||||
|
||||
// The configuration options for using the same encryption method as the
|
||||
// source.
|
||||
message SourceEncryptionOptions {}
|
||||
|
||||
// The configuration options for using CMEK (Customer Managed Encryption
|
||||
// Key) encryption.
|
||||
message CustomerManagedEncryptionOptions {
|
||||
// Required. Only keys in the same location as the database are allowed to
|
||||
// be used for encryption.
|
||||
//
|
||||
// For Firestore's nam5 multi-region, this corresponds to Cloud KMS
|
||||
// multi-region us. For Firestore's eur3 multi-region, this corresponds to
|
||||
// Cloud KMS multi-region europe. See
|
||||
// https://cloud.google.com/kms/docs/locations.
|
||||
//
|
||||
// The expected format is
|
||||
// `projects/{project_id}/locations/{kms_location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}`.
|
||||
string kms_key_name = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
}
|
||||
|
||||
// The method for encrypting the database.
|
||||
oneof encryption_type {
|
||||
// Use Google default encryption.
|
||||
GoogleDefaultEncryptionOptions google_default_encryption = 1;
|
||||
|
||||
// The database will use the same encryption configuration as the source.
|
||||
SourceEncryptionOptions use_source_encryption = 2;
|
||||
|
||||
// Use Customer Managed Encryption Keys (CMEK) for encryption.
|
||||
CustomerManagedEncryptionOptions customer_managed_encryption = 3;
|
||||
}
|
||||
}
|
||||
|
||||
// The resource name of the Database.
|
||||
// Format: `projects/{project}/databases/{database}`
|
||||
string name = 1;
|
||||
|
||||
// Output only. The system-generated UUID4 for this Database.
|
||||
string uid = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Output only. The timestamp at which this database was created. Databases
|
||||
// created before 2016 do not populate create_time.
|
||||
google.protobuf.Timestamp create_time = 5
|
||||
[(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Output only. The timestamp at which this database was most recently
|
||||
// updated. Note this only includes updates to the database resource and not
|
||||
// data contained by the database.
|
||||
google.protobuf.Timestamp update_time = 6
|
||||
[(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Output only. The timestamp at which this database was deleted. Only set if
|
||||
// the database has been deleted.
|
||||
google.protobuf.Timestamp delete_time = 7
|
||||
[(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// The location of the database. Available locations are listed at
|
||||
// https://cloud.google.com/firestore/docs/locations.
|
||||
string location_id = 9;
|
||||
|
||||
// The type of the database.
|
||||
// See https://cloud.google.com/datastore/docs/firestore-or-datastore for
|
||||
// information about how to choose.
|
||||
DatabaseType type = 10;
|
||||
|
||||
// The concurrency control mode to use for this database.
|
||||
ConcurrencyMode concurrency_mode = 15;
|
||||
|
||||
// Output only. The period during which past versions of data are retained in
|
||||
// the database.
|
||||
//
|
||||
// Any [read][google.firestore.v1.GetDocumentRequest.read_time]
|
||||
// or [query][google.firestore.v1.ListDocumentsRequest.read_time] can specify
|
||||
// a `read_time` within this window, and will read the state of the database
|
||||
// at that time.
|
||||
//
|
||||
// If the PITR feature is enabled, the retention period is 7 days. Otherwise,
|
||||
// the retention period is 1 hour.
|
||||
google.protobuf.Duration version_retention_period = 17
|
||||
[(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Output only. The earliest timestamp at which older versions of the data can
|
||||
// be read from the database. See [version_retention_period] above; this field
|
||||
// is populated with `now - version_retention_period`.
|
||||
//
|
||||
// This value is continuously updated, and becomes stale the moment it is
|
||||
// queried. If you are using this value to recover data, make sure to account
|
||||
// for the time from the moment when the value is queried to the moment when
|
||||
// you initiate the recovery.
|
||||
google.protobuf.Timestamp earliest_version_time = 18
|
||||
[(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Whether to enable the PITR feature on this database.
|
||||
PointInTimeRecoveryEnablement point_in_time_recovery_enablement = 21;
|
||||
|
||||
// The App Engine integration mode to use for this database.
|
||||
AppEngineIntegrationMode app_engine_integration_mode = 19;
|
||||
|
||||
// Output only. The key_prefix for this database. This key_prefix is used, in
|
||||
// combination with the project ID ("<key prefix>~<project id>") to construct
|
||||
// the application ID that is returned from the Cloud Datastore APIs in Google
|
||||
// App Engine first generation runtimes.
|
||||
//
|
||||
// This value may be empty in which case the appid to use for URL-encoded keys
|
||||
// is the project_id (eg: foo instead of v~foo).
|
||||
string key_prefix = 20 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// State of delete protection for the database.
|
||||
DeleteProtectionState delete_protection_state = 22;
|
||||
|
||||
// Optional. Presence indicates CMEK is enabled for this database.
|
||||
CmekConfig cmek_config = 23 [(google.api.field_behavior) = OPTIONAL];
|
||||
|
||||
// Output only. The database resource's prior database ID. This field is only
|
||||
// populated for deleted databases.
|
||||
string previous_id = 25 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Output only. Information about the provenance of this database.
|
||||
SourceInfo source_info = 26 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// This checksum is computed by the server based on the value of other
|
||||
// fields, and may be sent on update and delete requests to ensure the
|
||||
// client has an up-to-date value before proceeding.
|
||||
string etag = 99;
|
||||
}
|
||||
137
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/admin/v1/field.proto
generated
vendored
Normal file
137
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/admin/v1/field.proto
generated
vendored
Normal file
@@ -0,0 +1,137 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.admin.v1;
|
||||
|
||||
import "google/api/field_behavior.proto";
|
||||
import "google/api/resource.proto";
|
||||
import "google/firestore/admin/v1/index.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.Admin.V1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1/admin/adminpb;adminpb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "FieldProto";
|
||||
option java_package = "com.google.firestore.admin.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::Admin::V1";
|
||||
|
||||
// Represents a single field in the database.
|
||||
//
|
||||
// Fields are grouped by their "Collection Group", which represent all
|
||||
// collections in the database with the same ID.
|
||||
message Field {
|
||||
option (google.api.resource) = {
|
||||
type: "firestore.googleapis.com/Field"
|
||||
pattern: "projects/{project}/databases/{database}/collectionGroups/{collection}/fields/{field}"
|
||||
};
|
||||
|
||||
// The index configuration for this field.
|
||||
message IndexConfig {
|
||||
// The indexes supported for this field.
|
||||
repeated Index indexes = 1;
|
||||
|
||||
// Output only. When true, the `Field`'s index configuration is set from the
|
||||
// configuration specified by the `ancestor_field`.
|
||||
// When false, the `Field`'s index configuration is defined explicitly.
|
||||
bool uses_ancestor_config = 2;
|
||||
|
||||
// Output only. Specifies the resource name of the `Field` from which this
|
||||
// field's index configuration is set (when `uses_ancestor_config` is true),
|
||||
// or from which it *would* be set if this field had no index configuration
|
||||
// (when `uses_ancestor_config` is false).
|
||||
string ancestor_field = 3;
|
||||
|
||||
// Output only
|
||||
// When true, the `Field`'s index configuration is in the process of being
|
||||
// reverted. Once complete, the index config will transition to the same
|
||||
// state as the field specified by `ancestor_field`, at which point
|
||||
// `uses_ancestor_config` will be `true` and `reverting` will be `false`.
|
||||
bool reverting = 4;
|
||||
}
|
||||
|
||||
// The TTL (time-to-live) configuration for documents that have this `Field`
|
||||
// set.
|
||||
//
|
||||
// Storing a timestamp value into a TTL-enabled field will be treated as
|
||||
// the document's absolute expiration time. Timestamp values in the past
|
||||
// indicate that the document is eligible for immediate expiration. Using any
|
||||
// other data type or leaving the field absent will disable expiration for the
|
||||
// individual document.
|
||||
message TtlConfig {
|
||||
// The state of applying the TTL configuration to all documents.
|
||||
enum State {
|
||||
// The state is unspecified or unknown.
|
||||
STATE_UNSPECIFIED = 0;
|
||||
|
||||
// The TTL is being applied. There is an active long-running operation to
|
||||
// track the change. Newly written documents will have TTLs applied as
|
||||
// requested. Requested TTLs on existing documents are still being
|
||||
// processed. When TTLs on all existing documents have been processed, the
|
||||
// state will move to 'ACTIVE'.
|
||||
CREATING = 1;
|
||||
|
||||
// The TTL is active for all documents.
|
||||
ACTIVE = 2;
|
||||
|
||||
// The TTL configuration could not be enabled for all existing documents.
|
||||
// Newly written documents will continue to have their TTL applied.
|
||||
// The LRO returned when last attempting to enable TTL for this `Field`
|
||||
// has failed, and may have more details.
|
||||
NEEDS_REPAIR = 3;
|
||||
}
|
||||
|
||||
// Output only. The state of the TTL configuration.
|
||||
State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
}
|
||||
|
||||
// Required. A field name of the form:
|
||||
// `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_path}`
|
||||
//
|
||||
// A field path can be a simple field name, e.g. `address` or a path to fields
|
||||
// within `map_value` , e.g. `address.city`,
|
||||
// or a special field path. The only valid special field is `*`, which
|
||||
// represents any field.
|
||||
//
|
||||
// Field paths can be quoted using `` ` `` (backtick). The only character that
|
||||
// must be escaped within a quoted field path is the backtick character
|
||||
// itself, escaped using a backslash. Special characters in field paths that
|
||||
// must be quoted include: `*`, `.`,
|
||||
// `` ` `` (backtick), `[`, `]`, as well as any ascii symbolic characters.
|
||||
//
|
||||
// Examples:
|
||||
// `` `address.city` `` represents a field named `address.city`, not the map
|
||||
// key `city` in the field `address`. `` `*` `` represents a field named `*`,
|
||||
// not any field.
|
||||
//
|
||||
// A special `Field` contains the default indexing settings for all fields.
|
||||
// This field's resource name is:
|
||||
// `projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*`
|
||||
// Indexes defined on this `Field` will be applied to all fields which do not
|
||||
// have their own `Field` index configuration.
|
||||
string name = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The index configuration for this field. If unset, field indexing will
|
||||
// revert to the configuration defined by the `ancestor_field`. To
|
||||
// explicitly remove all indexes for this field, specify an index config
|
||||
// with an empty list of indexes.
|
||||
IndexConfig index_config = 2;
|
||||
|
||||
// The TTL configuration for this `Field`.
|
||||
// Setting or unsetting this will enable or disable the TTL for
|
||||
// documents that have this `Field`.
|
||||
TtlConfig ttl_config = 3;
|
||||
}
|
||||
958
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/admin/v1/firestore_admin.proto
generated
vendored
Normal file
958
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/admin/v1/firestore_admin.proto
generated
vendored
Normal file
@@ -0,0 +1,958 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.admin.v1;
|
||||
|
||||
import "google/api/annotations.proto";
|
||||
import "google/api/client.proto";
|
||||
import "google/api/field_behavior.proto";
|
||||
import "google/api/resource.proto";
|
||||
import "google/firestore/admin/v1/backup.proto";
|
||||
import "google/firestore/admin/v1/database.proto";
|
||||
import "google/firestore/admin/v1/field.proto";
|
||||
import "google/firestore/admin/v1/index.proto";
|
||||
import "google/firestore/admin/v1/operation.proto";
|
||||
import "google/firestore/admin/v1/schedule.proto";
|
||||
import "google/longrunning/operations.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/field_mask.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.Admin.V1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1/admin/adminpb;adminpb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "FirestoreAdminProto";
|
||||
option java_package = "com.google.firestore.admin.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::Admin::V1";
|
||||
option (google.api.resource_definition) = {
|
||||
type: "firestore.googleapis.com/Location"
|
||||
pattern: "projects/{project}/locations/{location}"
|
||||
};
|
||||
option (google.api.resource_definition) = {
|
||||
type: "firestore.googleapis.com/CollectionGroup"
|
||||
pattern: "projects/{project}/databases/{database}/collectionGroups/{collection}"
|
||||
};
|
||||
|
||||
// The Cloud Firestore Admin API.
|
||||
//
|
||||
// This API provides several administrative services for Cloud Firestore.
|
||||
//
|
||||
// Project, Database, Namespace, Collection, Collection Group, and Document are
|
||||
// used as defined in the Google Cloud Firestore API.
|
||||
//
|
||||
// Operation: An Operation represents work being performed in the background.
|
||||
//
|
||||
// The index service manages Cloud Firestore indexes.
|
||||
//
|
||||
// Index creation is performed asynchronously.
|
||||
// An Operation resource is created for each such asynchronous operation.
|
||||
// The state of the operation (including any errors encountered)
|
||||
// may be queried via the Operation resource.
|
||||
//
|
||||
// The Operations collection provides a record of actions performed for the
|
||||
// specified Project (including any Operations in progress). Operations are not
|
||||
// created directly but through calls on other collections or resources.
|
||||
//
|
||||
// An Operation that is done may be deleted so that it is no longer listed as
|
||||
// part of the Operation collection. Operations are garbage collected after
|
||||
// 30 days. By default, ListOperations will only return in progress and failed
|
||||
// operations. To list completed operation, issue a ListOperations request with
|
||||
// the filter `done: true`.
|
||||
//
|
||||
// Operations are created by service `FirestoreAdmin`, but are accessed via
|
||||
// service `google.longrunning.Operations`.
|
||||
service FirestoreAdmin {
|
||||
option (google.api.default_host) = "firestore.googleapis.com";
|
||||
option (google.api.oauth_scopes) =
|
||||
"https://www.googleapis.com/auth/cloud-platform,"
|
||||
"https://www.googleapis.com/auth/datastore";
|
||||
|
||||
// Creates a composite index. This returns a
|
||||
// [google.longrunning.Operation][google.longrunning.Operation] which may be
|
||||
// used to track the status of the creation. The metadata for the operation
|
||||
// will be the type
|
||||
// [IndexOperationMetadata][google.firestore.admin.v1.IndexOperationMetadata].
|
||||
rpc CreateIndex(CreateIndexRequest) returns (google.longrunning.Operation) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{parent=projects/*/databases/*/collectionGroups/*}/indexes"
|
||||
body: "index"
|
||||
};
|
||||
option (google.api.method_signature) = "parent,index";
|
||||
option (google.longrunning.operation_info) = {
|
||||
response_type: "Index"
|
||||
metadata_type: "IndexOperationMetadata"
|
||||
};
|
||||
}
|
||||
|
||||
// Lists composite indexes.
|
||||
rpc ListIndexes(ListIndexesRequest) returns (ListIndexesResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1/{parent=projects/*/databases/*/collectionGroups/*}/indexes"
|
||||
};
|
||||
option (google.api.method_signature) = "parent";
|
||||
}
|
||||
|
||||
// Gets a composite index.
|
||||
rpc GetIndex(GetIndexRequest) returns (Index) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1/{name=projects/*/databases/*/collectionGroups/*/indexes/*}"
|
||||
};
|
||||
option (google.api.method_signature) = "name";
|
||||
}
|
||||
|
||||
// Deletes a composite index.
|
||||
rpc DeleteIndex(DeleteIndexRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
delete: "/v1/{name=projects/*/databases/*/collectionGroups/*/indexes/*}"
|
||||
};
|
||||
option (google.api.method_signature) = "name";
|
||||
}
|
||||
|
||||
// Gets the metadata and configuration for a Field.
|
||||
rpc GetField(GetFieldRequest) returns (Field) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1/{name=projects/*/databases/*/collectionGroups/*/fields/*}"
|
||||
};
|
||||
option (google.api.method_signature) = "name";
|
||||
}
|
||||
|
||||
// Updates a field configuration. Currently, field updates apply only to
|
||||
// single field index configuration. However, calls to
|
||||
// [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField]
|
||||
// should provide a field mask to avoid changing any configuration that the
|
||||
// caller isn't aware of. The field mask should be specified as: `{ paths:
|
||||
// "index_config" }`.
|
||||
//
|
||||
// This call returns a
|
||||
// [google.longrunning.Operation][google.longrunning.Operation] which may be
|
||||
// used to track the status of the field update. The metadata for the
|
||||
// operation will be the type
|
||||
// [FieldOperationMetadata][google.firestore.admin.v1.FieldOperationMetadata].
|
||||
//
|
||||
// To configure the default field settings for the database, use
|
||||
// the special `Field` with resource name:
|
||||
// `projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*`.
|
||||
rpc UpdateField(UpdateFieldRequest) returns (google.longrunning.Operation) {
|
||||
option (google.api.http) = {
|
||||
patch: "/v1/{field.name=projects/*/databases/*/collectionGroups/*/fields/*}"
|
||||
body: "field"
|
||||
};
|
||||
option (google.api.method_signature) = "field";
|
||||
option (google.longrunning.operation_info) = {
|
||||
response_type: "Field"
|
||||
metadata_type: "FieldOperationMetadata"
|
||||
};
|
||||
}
|
||||
|
||||
// Lists the field configuration and metadata for this database.
|
||||
//
|
||||
// Currently,
|
||||
// [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields]
|
||||
// only supports listing fields that have been explicitly overridden. To issue
|
||||
// this query, call
|
||||
// [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields]
|
||||
// with the filter set to `indexConfig.usesAncestorConfig:false` or
|
||||
// `ttlConfig:*`.
|
||||
rpc ListFields(ListFieldsRequest) returns (ListFieldsResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1/{parent=projects/*/databases/*/collectionGroups/*}/fields"
|
||||
};
|
||||
option (google.api.method_signature) = "parent";
|
||||
}
|
||||
|
||||
// Exports a copy of all or a subset of documents from Google Cloud Firestore
|
||||
// to another storage system, such as Google Cloud Storage. Recent updates to
|
||||
// documents may not be reflected in the export. The export occurs in the
|
||||
// background and its progress can be monitored and managed via the
|
||||
// Operation resource that is created. The output of an export may only be
|
||||
// used once the associated operation is done. If an export operation is
|
||||
// cancelled before completion it may leave partial data behind in Google
|
||||
// Cloud Storage.
|
||||
//
|
||||
// For more details on export behavior and output format, refer to:
|
||||
// https://cloud.google.com/firestore/docs/manage-data/export-import
|
||||
rpc ExportDocuments(ExportDocumentsRequest)
|
||||
returns (google.longrunning.Operation) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{name=projects/*/databases/*}:exportDocuments"
|
||||
body: "*"
|
||||
};
|
||||
option (google.api.method_signature) = "name";
|
||||
option (google.longrunning.operation_info) = {
|
||||
response_type: "ExportDocumentsResponse"
|
||||
metadata_type: "ExportDocumentsMetadata"
|
||||
};
|
||||
}
|
||||
|
||||
// Imports documents into Google Cloud Firestore. Existing documents with the
|
||||
// same name are overwritten. The import occurs in the background and its
|
||||
// progress can be monitored and managed via the Operation resource that is
|
||||
// created. If an ImportDocuments operation is cancelled, it is possible
|
||||
// that a subset of the data has already been imported to Cloud Firestore.
|
||||
rpc ImportDocuments(ImportDocumentsRequest)
|
||||
returns (google.longrunning.Operation) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{name=projects/*/databases/*}:importDocuments"
|
||||
body: "*"
|
||||
};
|
||||
option (google.api.method_signature) = "name";
|
||||
option (google.longrunning.operation_info) = {
|
||||
response_type: "google.protobuf.Empty"
|
||||
metadata_type: "ImportDocumentsMetadata"
|
||||
};
|
||||
}
|
||||
|
||||
// Bulk deletes a subset of documents from Google Cloud Firestore.
|
||||
// Documents created or updated after the underlying system starts to process
|
||||
// the request will not be deleted. The bulk delete occurs in the background
|
||||
// and its progress can be monitored and managed via the Operation resource
|
||||
// that is created.
|
||||
//
|
||||
// For more details on bulk delete behavior, refer to:
|
||||
// https://cloud.google.com/firestore/docs/manage-data/bulk-delete
|
||||
rpc BulkDeleteDocuments(BulkDeleteDocumentsRequest)
|
||||
returns (google.longrunning.Operation) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{name=projects/*/databases/*}:bulkDeleteDocuments"
|
||||
body: "*"
|
||||
};
|
||||
option (google.api.method_signature) = "name";
|
||||
option (google.longrunning.operation_info) = {
|
||||
response_type: "BulkDeleteDocumentsResponse"
|
||||
metadata_type: "BulkDeleteDocumentsMetadata"
|
||||
};
|
||||
}
|
||||
|
||||
// Create a database.
|
||||
rpc CreateDatabase(CreateDatabaseRequest)
|
||||
returns (google.longrunning.Operation) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{parent=projects/*}/databases"
|
||||
body: "database"
|
||||
};
|
||||
option (google.api.method_signature) = "parent,database,database_id";
|
||||
option (google.longrunning.operation_info) = {
|
||||
response_type: "Database"
|
||||
metadata_type: "CreateDatabaseMetadata"
|
||||
};
|
||||
}
|
||||
|
||||
// Gets information about a database.
|
||||
rpc GetDatabase(GetDatabaseRequest) returns (Database) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1/{name=projects/*/databases/*}"
|
||||
};
|
||||
option (google.api.method_signature) = "name";
|
||||
}
|
||||
|
||||
// List all the databases in the project.
|
||||
rpc ListDatabases(ListDatabasesRequest) returns (ListDatabasesResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1/{parent=projects/*}/databases"
|
||||
};
|
||||
option (google.api.method_signature) = "parent";
|
||||
}
|
||||
|
||||
// Updates a database.
|
||||
rpc UpdateDatabase(UpdateDatabaseRequest)
|
||||
returns (google.longrunning.Operation) {
|
||||
option (google.api.http) = {
|
||||
patch: "/v1/{database.name=projects/*/databases/*}"
|
||||
body: "database"
|
||||
};
|
||||
option (google.api.method_signature) = "database,update_mask";
|
||||
option (google.longrunning.operation_info) = {
|
||||
response_type: "Database"
|
||||
metadata_type: "UpdateDatabaseMetadata"
|
||||
};
|
||||
}
|
||||
|
||||
// Deletes a database.
|
||||
rpc DeleteDatabase(DeleteDatabaseRequest)
|
||||
returns (google.longrunning.Operation) {
|
||||
option (google.api.http) = {
|
||||
delete: "/v1/{name=projects/*/databases/*}"
|
||||
};
|
||||
option (google.api.method_signature) = "name";
|
||||
option (google.longrunning.operation_info) = {
|
||||
response_type: "Database"
|
||||
metadata_type: "DeleteDatabaseMetadata"
|
||||
};
|
||||
}
|
||||
|
||||
// Gets information about a backup.
|
||||
rpc GetBackup(GetBackupRequest) returns (Backup) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1/{name=projects/*/locations/*/backups/*}"
|
||||
};
|
||||
option (google.api.method_signature) = "name";
|
||||
}
|
||||
|
||||
// Lists all the backups.
|
||||
rpc ListBackups(ListBackupsRequest) returns (ListBackupsResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1/{parent=projects/*/locations/*}/backups"
|
||||
};
|
||||
option (google.api.method_signature) = "parent";
|
||||
}
|
||||
|
||||
// Deletes a backup.
|
||||
rpc DeleteBackup(DeleteBackupRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
delete: "/v1/{name=projects/*/locations/*/backups/*}"
|
||||
};
|
||||
option (google.api.method_signature) = "name";
|
||||
}
|
||||
|
||||
// Creates a new database by restoring from an existing backup.
|
||||
//
|
||||
// The new database must be in the same cloud region or multi-region location
|
||||
// as the existing backup. This behaves similar to
|
||||
// [FirestoreAdmin.CreateDatabase][google.firestore.admin.v1.FirestoreAdmin.CreateDatabase]
|
||||
// except instead of creating a new empty database, a new database is created
|
||||
// with the database type, index configuration, and documents from an existing
|
||||
// backup.
|
||||
//
|
||||
// The [long-running operation][google.longrunning.Operation] can be used to
|
||||
// track the progress of the restore, with the Operation's
|
||||
// [metadata][google.longrunning.Operation.metadata] field type being the
|
||||
// [RestoreDatabaseMetadata][google.firestore.admin.v1.RestoreDatabaseMetadata].
|
||||
// The [response][google.longrunning.Operation.response] type is the
|
||||
// [Database][google.firestore.admin.v1.Database] if the restore was
|
||||
// successful. The new database is not readable or writeable until the LRO has
|
||||
// completed.
|
||||
rpc RestoreDatabase(RestoreDatabaseRequest)
|
||||
returns (google.longrunning.Operation) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{parent=projects/*}/databases:restore"
|
||||
body: "*"
|
||||
};
|
||||
option (google.longrunning.operation_info) = {
|
||||
response_type: "Database"
|
||||
metadata_type: "RestoreDatabaseMetadata"
|
||||
};
|
||||
}
|
||||
|
||||
// Creates a backup schedule on a database.
|
||||
// At most two backup schedules can be configured on a database, one daily
|
||||
// backup schedule and one weekly backup schedule.
|
||||
rpc CreateBackupSchedule(CreateBackupScheduleRequest)
|
||||
returns (BackupSchedule) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{parent=projects/*/databases/*}/backupSchedules"
|
||||
body: "backup_schedule"
|
||||
};
|
||||
option (google.api.method_signature) = "parent,backup_schedule";
|
||||
}
|
||||
|
||||
// Gets information about a backup schedule.
|
||||
rpc GetBackupSchedule(GetBackupScheduleRequest) returns (BackupSchedule) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1/{name=projects/*/databases/*/backupSchedules/*}"
|
||||
};
|
||||
option (google.api.method_signature) = "name";
|
||||
}
|
||||
|
||||
// List backup schedules.
|
||||
rpc ListBackupSchedules(ListBackupSchedulesRequest)
|
||||
returns (ListBackupSchedulesResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1/{parent=projects/*/databases/*}/backupSchedules"
|
||||
};
|
||||
option (google.api.method_signature) = "parent";
|
||||
}
|
||||
|
||||
// Updates a backup schedule.
|
||||
rpc UpdateBackupSchedule(UpdateBackupScheduleRequest)
|
||||
returns (BackupSchedule) {
|
||||
option (google.api.http) = {
|
||||
patch: "/v1/{backup_schedule.name=projects/*/databases/*/backupSchedules/*}"
|
||||
body: "backup_schedule"
|
||||
};
|
||||
option (google.api.method_signature) = "backup_schedule,update_mask";
|
||||
}
|
||||
|
||||
// Deletes a backup schedule.
|
||||
rpc DeleteBackupSchedule(DeleteBackupScheduleRequest)
|
||||
returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
delete: "/v1/{name=projects/*/databases/*/backupSchedules/*}"
|
||||
};
|
||||
option (google.api.method_signature) = "name";
|
||||
}
|
||||
}
|
||||
|
||||
// A request to list the Firestore Databases in all locations for a project.
|
||||
message ListDatabasesRequest {
|
||||
// Required. A parent name of the form
|
||||
// `projects/{project_id}`
|
||||
string parent = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
child_type: "firestore.googleapis.com/Database"
|
||||
}
|
||||
];
|
||||
|
||||
// If true, also returns deleted resources.
|
||||
bool show_deleted = 4;
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.CreateDatabase][google.firestore.admin.v1.FirestoreAdmin.CreateDatabase].
|
||||
message CreateDatabaseRequest {
|
||||
// Required. A parent name of the form
|
||||
// `projects/{project_id}`
|
||||
string parent = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
child_type: "firestore.googleapis.com/Database"
|
||||
}
|
||||
];
|
||||
|
||||
// Required. The Database to create.
|
||||
Database database = 2 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Required. The ID to use for the database, which will become the final
|
||||
// component of the database's resource name.
|
||||
//
|
||||
// This value should be 4-63 characters. Valid characters are /[a-z][0-9]-/
|
||||
// with first character a letter and the last a letter or a number. Must not
|
||||
// be UUID-like /[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}/.
|
||||
//
|
||||
// "(default)" database ID is also valid.
|
||||
string database_id = 3 [(google.api.field_behavior) = REQUIRED];
|
||||
}
|
||||
|
||||
// Metadata related to the create database operation.
|
||||
message CreateDatabaseMetadata {}
|
||||
|
||||
// The list of databases for a project.
|
||||
message ListDatabasesResponse {
|
||||
// The databases in the project.
|
||||
repeated Database databases = 1;
|
||||
|
||||
// In the event that data about individual databases cannot be listed they
|
||||
// will be recorded here.
|
||||
//
|
||||
// An example entry might be: projects/some_project/locations/some_location
|
||||
// This can happen if the Cloud Region that the Database resides in is
|
||||
// currently unavailable. In this case we can't fetch all the details about
|
||||
// the database. You may be able to get a more detailed error message
|
||||
// (or possibly fetch the resource) by sending a 'Get' request for the
|
||||
// resource or a 'List' request for the specific location.
|
||||
repeated string unreachable = 3;
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.GetDatabase][google.firestore.admin.v1.FirestoreAdmin.GetDatabase].
|
||||
message GetDatabaseRequest {
|
||||
// Required. A name of the form
|
||||
// `projects/{project_id}/databases/{database_id}`
|
||||
string name = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/Database"
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.UpdateDatabase][google.firestore.admin.v1.FirestoreAdmin.UpdateDatabase].
|
||||
message UpdateDatabaseRequest {
|
||||
// Required. The database to update.
|
||||
Database database = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The list of fields to be updated.
|
||||
google.protobuf.FieldMask update_mask = 2;
|
||||
}
|
||||
|
||||
// Metadata related to the update database operation.
|
||||
message UpdateDatabaseMetadata {}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.DeleteDatabase][google.firestore.admin.v1.FirestoreAdmin.DeleteDatabase].
|
||||
message DeleteDatabaseRequest {
|
||||
// Required. A name of the form
|
||||
// `projects/{project_id}/databases/{database_id}`
|
||||
string name = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/Database"
|
||||
}
|
||||
];
|
||||
|
||||
// The current etag of the Database.
|
||||
// If an etag is provided and does not match the current etag of the database,
|
||||
// deletion will be blocked and a FAILED_PRECONDITION error will be returned.
|
||||
string etag = 3;
|
||||
}
|
||||
|
||||
// Metadata related to the delete database operation.
|
||||
message DeleteDatabaseMetadata {}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.CreateBackupSchedule][google.firestore.admin.v1.FirestoreAdmin.CreateBackupSchedule].
|
||||
message CreateBackupScheduleRequest {
|
||||
// Required. The parent database.
|
||||
//
|
||||
// Format `projects/{project}/databases/{database}`
|
||||
string parent = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/Database"
|
||||
}
|
||||
];
|
||||
|
||||
// Required. The backup schedule to create.
|
||||
BackupSchedule backup_schedule = 2 [(google.api.field_behavior) = REQUIRED];
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.GetBackupSchedule][google.firestore.admin.v1.FirestoreAdmin.GetBackupSchedule].
|
||||
message GetBackupScheduleRequest {
|
||||
// Required. The name of the backup schedule.
|
||||
//
|
||||
// Format
|
||||
// `projects/{project}/databases/{database}/backupSchedules/{backup_schedule}`
|
||||
string name = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/BackupSchedule"
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.UpdateBackupSchedule][google.firestore.admin.v1.FirestoreAdmin.UpdateBackupSchedule].
|
||||
message UpdateBackupScheduleRequest {
|
||||
// Required. The backup schedule to update.
|
||||
BackupSchedule backup_schedule = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The list of fields to be updated.
|
||||
google.protobuf.FieldMask update_mask = 2;
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.ListBackupSchedules][google.firestore.admin.v1.FirestoreAdmin.ListBackupSchedules].
|
||||
message ListBackupSchedulesRequest {
|
||||
// Required. The parent database.
|
||||
//
|
||||
// Format is `projects/{project}/databases/{database}`.
|
||||
string parent = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/Database"
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
// The response for
|
||||
// [FirestoreAdmin.ListBackupSchedules][google.firestore.admin.v1.FirestoreAdmin.ListBackupSchedules].
|
||||
message ListBackupSchedulesResponse {
|
||||
// List of all backup schedules.
|
||||
repeated BackupSchedule backup_schedules = 1;
|
||||
}
|
||||
|
||||
// The request for [FirestoreAdmin.DeleteBackupSchedules][].
|
||||
message DeleteBackupScheduleRequest {
|
||||
// Required. The name of the backup schedule.
|
||||
//
|
||||
// Format
|
||||
// `projects/{project}/databases/{database}/backupSchedules/{backup_schedule}`
|
||||
string name = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/BackupSchedule"
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.CreateIndex][google.firestore.admin.v1.FirestoreAdmin.CreateIndex].
|
||||
message CreateIndexRequest {
|
||||
// Required. A parent name of the form
|
||||
// `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}`
|
||||
string parent = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/CollectionGroup"
|
||||
}
|
||||
];
|
||||
|
||||
// Required. The composite index to create.
|
||||
Index index = 2 [(google.api.field_behavior) = REQUIRED];
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.ListIndexes][google.firestore.admin.v1.FirestoreAdmin.ListIndexes].
|
||||
message ListIndexesRequest {
|
||||
// Required. A parent name of the form
|
||||
// `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}`
|
||||
string parent = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/CollectionGroup"
|
||||
}
|
||||
];
|
||||
|
||||
// The filter to apply to list results.
|
||||
string filter = 2;
|
||||
|
||||
// The number of results to return.
|
||||
int32 page_size = 3;
|
||||
|
||||
// A page token, returned from a previous call to
|
||||
// [FirestoreAdmin.ListIndexes][google.firestore.admin.v1.FirestoreAdmin.ListIndexes],
|
||||
// that may be used to get the next page of results.
|
||||
string page_token = 4;
|
||||
}
|
||||
|
||||
// The response for
|
||||
// [FirestoreAdmin.ListIndexes][google.firestore.admin.v1.FirestoreAdmin.ListIndexes].
|
||||
message ListIndexesResponse {
|
||||
// The requested indexes.
|
||||
repeated Index indexes = 1;
|
||||
|
||||
// A page token that may be used to request another page of results. If blank,
|
||||
// this is the last page.
|
||||
string next_page_token = 2;
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.GetIndex][google.firestore.admin.v1.FirestoreAdmin.GetIndex].
|
||||
message GetIndexRequest {
|
||||
// Required. A name of the form
|
||||
// `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}`
|
||||
string name = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = { type: "firestore.googleapis.com/Index" }
|
||||
];
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.DeleteIndex][google.firestore.admin.v1.FirestoreAdmin.DeleteIndex].
|
||||
message DeleteIndexRequest {
|
||||
// Required. A name of the form
|
||||
// `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}`
|
||||
string name = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = { type: "firestore.googleapis.com/Index" }
|
||||
];
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField].
|
||||
message UpdateFieldRequest {
|
||||
// Required. The field to be updated.
|
||||
Field field = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// A mask, relative to the field. If specified, only configuration specified
|
||||
// by this field_mask will be updated in the field.
|
||||
google.protobuf.FieldMask update_mask = 2;
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.GetField][google.firestore.admin.v1.FirestoreAdmin.GetField].
|
||||
message GetFieldRequest {
|
||||
// Required. A name of the form
|
||||
// `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_id}`
|
||||
string name = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = { type: "firestore.googleapis.com/Field" }
|
||||
];
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields].
|
||||
message ListFieldsRequest {
|
||||
// Required. A parent name of the form
|
||||
// `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}`
|
||||
string parent = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/CollectionGroup"
|
||||
}
|
||||
];
|
||||
|
||||
// The filter to apply to list results. Currently,
|
||||
// [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields]
|
||||
// only supports listing fields that have been explicitly overridden. To issue
|
||||
// this query, call
|
||||
// [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields]
|
||||
// with a filter that includes `indexConfig.usesAncestorConfig:false` or
|
||||
// `ttlConfig:*`.
|
||||
string filter = 2;
|
||||
|
||||
// The number of results to return.
|
||||
int32 page_size = 3;
|
||||
|
||||
// A page token, returned from a previous call to
|
||||
// [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields],
|
||||
// that may be used to get the next page of results.
|
||||
string page_token = 4;
|
||||
}
|
||||
|
||||
// The response for
|
||||
// [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields].
|
||||
message ListFieldsResponse {
|
||||
// The requested fields.
|
||||
repeated Field fields = 1;
|
||||
|
||||
// A page token that may be used to request another page of results. If blank,
|
||||
// this is the last page.
|
||||
string next_page_token = 2;
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1.FirestoreAdmin.ExportDocuments].
|
||||
message ExportDocumentsRequest {
|
||||
// Required. Database to export. Should be of the form:
|
||||
// `projects/{project_id}/databases/{database_id}`.
|
||||
string name = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/Database"
|
||||
}
|
||||
];
|
||||
|
||||
// Which collection IDs to export. Unspecified means all collections. Each
|
||||
// collection ID in this list must be unique.
|
||||
repeated string collection_ids = 2;
|
||||
|
||||
// The output URI. Currently only supports Google Cloud Storage URIs of the
|
||||
// form: `gs://BUCKET_NAME[/NAMESPACE_PATH]`, where `BUCKET_NAME` is the name
|
||||
// of the Google Cloud Storage bucket and `NAMESPACE_PATH` is an optional
|
||||
// Google Cloud Storage namespace path. When
|
||||
// choosing a name, be sure to consider Google Cloud Storage naming
|
||||
// guidelines: https://cloud.google.com/storage/docs/naming.
|
||||
// If the URI is a bucket (without a namespace path), a prefix will be
|
||||
// generated based on the start time.
|
||||
string output_uri_prefix = 3;
|
||||
|
||||
// An empty list represents all namespaces. This is the preferred
|
||||
// usage for databases that don't use namespaces.
|
||||
//
|
||||
// An empty string element represents the default namespace. This should be
|
||||
// used if the database has data in non-default namespaces, but doesn't want
|
||||
// to include them. Each namespace in this list must be unique.
|
||||
repeated string namespace_ids = 4;
|
||||
|
||||
// The timestamp that corresponds to the version of the database to be
|
||||
// exported. The timestamp must be in the past, rounded to the minute and not
|
||||
// older than
|
||||
// [earliestVersionTime][google.firestore.admin.v1.Database.earliest_version_time].
|
||||
// If specified, then the exported documents will represent a consistent view
|
||||
// of the database at the provided time. Otherwise, there are no guarantees
|
||||
// about the consistency of the exported documents.
|
||||
google.protobuf.Timestamp snapshot_time = 5;
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1.FirestoreAdmin.ImportDocuments].
|
||||
message ImportDocumentsRequest {
|
||||
// Required. Database to import into. Should be of the form:
|
||||
// `projects/{project_id}/databases/{database_id}`.
|
||||
string name = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/Database"
|
||||
}
|
||||
];
|
||||
|
||||
// Which collection IDs to import. Unspecified means all collections included
|
||||
// in the import. Each collection ID in this list must be unique.
|
||||
repeated string collection_ids = 2;
|
||||
|
||||
// Location of the exported files.
|
||||
// This must match the output_uri_prefix of an ExportDocumentsResponse from
|
||||
// an export that has completed successfully.
|
||||
// See:
|
||||
// [google.firestore.admin.v1.ExportDocumentsResponse.output_uri_prefix][google.firestore.admin.v1.ExportDocumentsResponse.output_uri_prefix].
|
||||
string input_uri_prefix = 3;
|
||||
|
||||
// An empty list represents all namespaces. This is the preferred
|
||||
// usage for databases that don't use namespaces.
|
||||
//
|
||||
// An empty string element represents the default namespace. This should be
|
||||
// used if the database has data in non-default namespaces, but doesn't want
|
||||
// to include them. Each namespace in this list must be unique.
|
||||
repeated string namespace_ids = 4;
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.BulkDeleteDocuments][google.firestore.admin.v1.FirestoreAdmin.BulkDeleteDocuments].
|
||||
//
|
||||
// When both collection_ids and namespace_ids are set, only documents satisfying
|
||||
// both conditions will be deleted.
|
||||
//
|
||||
// Requests with namespace_ids and collection_ids both empty will be rejected.
|
||||
// Please use
|
||||
// [FirestoreAdmin.DeleteDatabase][google.firestore.admin.v1.FirestoreAdmin.DeleteDatabase]
|
||||
// instead.
|
||||
message BulkDeleteDocumentsRequest {
|
||||
// Required. Database to operate. Should be of the form:
|
||||
// `projects/{project_id}/databases/{database_id}`.
|
||||
string name = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/Database"
|
||||
}
|
||||
];
|
||||
|
||||
// Optional. IDs of the collection groups to delete. Unspecified means all
|
||||
// collection groups.
|
||||
//
|
||||
// Each collection group in this list must be unique.
|
||||
repeated string collection_ids = 2 [(google.api.field_behavior) = OPTIONAL];
|
||||
|
||||
// Optional. Namespaces to delete.
|
||||
//
|
||||
// An empty list means all namespaces. This is the recommended
|
||||
// usage for databases that don't use namespaces.
|
||||
//
|
||||
// An empty string element represents the default namespace. This should be
|
||||
// used if the database has data in non-default namespaces, but doesn't want
|
||||
// to delete from them.
|
||||
//
|
||||
// Each namespace in this list must be unique.
|
||||
repeated string namespace_ids = 3 [(google.api.field_behavior) = OPTIONAL];
|
||||
}
|
||||
|
||||
// The response for
|
||||
// [FirestoreAdmin.BulkDeleteDocuments][google.firestore.admin.v1.FirestoreAdmin.BulkDeleteDocuments].
|
||||
message BulkDeleteDocumentsResponse {}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.GetBackup][google.firestore.admin.v1.FirestoreAdmin.GetBackup].
|
||||
message GetBackupRequest {
|
||||
// Required. Name of the backup to fetch.
|
||||
//
|
||||
// Format is `projects/{project}/locations/{location}/backups/{backup}`.
|
||||
string name = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/Backup"
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.ListBackups][google.firestore.admin.v1.FirestoreAdmin.ListBackups].
|
||||
message ListBackupsRequest {
|
||||
// Required. The location to list backups from.
|
||||
//
|
||||
// Format is `projects/{project}/locations/{location}`.
|
||||
// Use `{location} = '-'` to list backups from all locations for the given
|
||||
// project. This allows listing backups from a single location or from all
|
||||
// locations.
|
||||
string parent = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/Location"
|
||||
}
|
||||
];
|
||||
|
||||
// An expression that filters the list of returned backups.
|
||||
//
|
||||
// A filter expression consists of a field name, a comparison operator, and a
|
||||
// value for filtering.
|
||||
// The value must be a string, a number, or a boolean. The comparison operator
|
||||
// must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
|
||||
// Colon `:` is the contains operator. Filter rules are not case sensitive.
|
||||
//
|
||||
// The following fields in the [Backup][google.firestore.admin.v1.Backup] are
|
||||
// eligible for filtering:
|
||||
//
|
||||
// * `database_uid` (supports `=` only)
|
||||
string filter = 2;
|
||||
}
|
||||
|
||||
// The response for
|
||||
// [FirestoreAdmin.ListBackups][google.firestore.admin.v1.FirestoreAdmin.ListBackups].
|
||||
message ListBackupsResponse {
|
||||
// List of all backups for the project.
|
||||
repeated Backup backups = 1;
|
||||
|
||||
// List of locations that existing backups were not able to be fetched from.
|
||||
//
|
||||
// Instead of failing the entire requests when a single location is
|
||||
// unreachable, this response returns a partial result set and list of
|
||||
// locations unable to be reached here. The request can be retried against a
|
||||
// single location to get a concrete error.
|
||||
repeated string unreachable = 3;
|
||||
}
|
||||
|
||||
// The request for
|
||||
// [FirestoreAdmin.DeleteBackup][google.firestore.admin.v1.FirestoreAdmin.DeleteBackup].
|
||||
message DeleteBackupRequest {
|
||||
// Required. Name of the backup to delete.
|
||||
//
|
||||
// format is `projects/{project}/locations/{location}/backups/{backup}`.
|
||||
string name = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/Backup"
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
// The request message for
|
||||
// [FirestoreAdmin.RestoreDatabase][google.firestore.admin.v1.FirestoreAdmin.RestoreDatabase].
|
||||
message RestoreDatabaseRequest {
|
||||
// Required. The project to restore the database in. Format is
|
||||
// `projects/{project_id}`.
|
||||
string parent = 1 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
child_type: "firestore.googleapis.com/Database"
|
||||
}
|
||||
];
|
||||
|
||||
// Required. The ID to use for the database, which will become the final
|
||||
// component of the database's resource name. This database ID must not be
|
||||
// associated with an existing database.
|
||||
//
|
||||
// This value should be 4-63 characters. Valid characters are /[a-z][0-9]-/
|
||||
// with first character a letter and the last a letter or a number. Must not
|
||||
// be UUID-like /[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}/.
|
||||
//
|
||||
// "(default)" database ID is also valid.
|
||||
string database_id = 2 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Required. Backup to restore from. Must be from the same project as the
|
||||
// parent.
|
||||
//
|
||||
// The restored database will be created in the same location as the source
|
||||
// backup.
|
||||
//
|
||||
// Format is: `projects/{project_id}/locations/{location}/backups/{backup}`
|
||||
string backup = 3 [
|
||||
(google.api.field_behavior) = REQUIRED,
|
||||
(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/Backup"
|
||||
}
|
||||
];
|
||||
|
||||
// Optional. Encryption configuration for the restored database.
|
||||
//
|
||||
// If this field is not specified, the restored database will use
|
||||
// the same encryption configuration as the backup, namely
|
||||
// [use_source_encryption][google.firestore.admin.v1.Database.EncryptionConfig.use_source_encryption].
|
||||
Database.EncryptionConfig encryption_config = 9
|
||||
[(google.api.field_behavior) = OPTIONAL];
|
||||
}
|
||||
198
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/admin/v1/index.proto
generated
vendored
Normal file
198
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/admin/v1/index.proto
generated
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.admin.v1;
|
||||
|
||||
import "google/api/field_behavior.proto";
|
||||
import "google/api/resource.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.Admin.V1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1/admin/adminpb;adminpb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "IndexProto";
|
||||
option java_package = "com.google.firestore.admin.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::Admin::V1";
|
||||
|
||||
// Cloud Firestore indexes enable simple and complex queries against
|
||||
// documents in a database.
|
||||
message Index {
|
||||
option (google.api.resource) = {
|
||||
type: "firestore.googleapis.com/Index"
|
||||
pattern: "projects/{project}/databases/{database}/collectionGroups/{collection}/indexes/{index}"
|
||||
};
|
||||
|
||||
// Query Scope defines the scope at which a query is run. This is specified on
|
||||
// a StructuredQuery's `from` field.
|
||||
enum QueryScope {
|
||||
// The query scope is unspecified. Not a valid option.
|
||||
QUERY_SCOPE_UNSPECIFIED = 0;
|
||||
|
||||
// Indexes with a collection query scope specified allow queries
|
||||
// against a collection that is the child of a specific document, specified
|
||||
// at query time, and that has the collection ID specified by the index.
|
||||
COLLECTION = 1;
|
||||
|
||||
// Indexes with a collection group query scope specified allow queries
|
||||
// against all collections that has the collection ID specified by the
|
||||
// index.
|
||||
COLLECTION_GROUP = 2;
|
||||
|
||||
// Include all the collections's ancestor in the index. Only available for
|
||||
// Datastore Mode databases.
|
||||
COLLECTION_RECURSIVE = 3;
|
||||
}
|
||||
|
||||
// API Scope defines the APIs (Firestore Native, or Firestore in
|
||||
// Datastore Mode) that are supported for queries.
|
||||
enum ApiScope {
|
||||
// The index can only be used by the Firestore Native query API.
|
||||
// This is the default.
|
||||
ANY_API = 0;
|
||||
|
||||
// The index can only be used by the Firestore in Datastore Mode query API.
|
||||
DATASTORE_MODE_API = 1;
|
||||
}
|
||||
|
||||
// A field in an index.
|
||||
// The field_path describes which field is indexed, the value_mode describes
|
||||
// how the field value is indexed.
|
||||
message IndexField {
|
||||
// The supported orderings.
|
||||
enum Order {
|
||||
// The ordering is unspecified. Not a valid option.
|
||||
ORDER_UNSPECIFIED = 0;
|
||||
|
||||
// The field is ordered by ascending field value.
|
||||
ASCENDING = 1;
|
||||
|
||||
// The field is ordered by descending field value.
|
||||
DESCENDING = 2;
|
||||
}
|
||||
|
||||
// The supported array value configurations.
|
||||
enum ArrayConfig {
|
||||
// The index does not support additional array queries.
|
||||
ARRAY_CONFIG_UNSPECIFIED = 0;
|
||||
|
||||
// The index supports array containment queries.
|
||||
CONTAINS = 1;
|
||||
}
|
||||
|
||||
// The index configuration to support vector search operations
|
||||
message VectorConfig {
|
||||
// An index that stores vectors in a flat data structure, and supports
|
||||
// exhaustive search.
|
||||
message FlatIndex {}
|
||||
|
||||
// Required. The vector dimension this configuration applies to.
|
||||
//
|
||||
// The resulting index will only include vectors of this dimension, and
|
||||
// can be used for vector search with the same dimension.
|
||||
int32 dimension = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The type of index used.
|
||||
oneof type {
|
||||
// Indicates the vector index is a flat index.
|
||||
FlatIndex flat = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Can be __name__.
|
||||
// For single field indexes, this must match the name of the field or may
|
||||
// be omitted.
|
||||
string field_path = 1;
|
||||
|
||||
// How the field value is indexed.
|
||||
oneof value_mode {
|
||||
// Indicates that this field supports ordering by the specified order or
|
||||
// comparing using =, !=, <, <=, >, >=.
|
||||
Order order = 2;
|
||||
|
||||
// Indicates that this field supports operations on `array_value`s.
|
||||
ArrayConfig array_config = 3;
|
||||
|
||||
// Indicates that this field supports nearest neighbor and distance
|
||||
// operations on vector.
|
||||
VectorConfig vector_config = 4;
|
||||
}
|
||||
}
|
||||
|
||||
// The state of an index. During index creation, an index will be in the
|
||||
// `CREATING` state. If the index is created successfully, it will transition
|
||||
// to the `READY` state. If the index creation encounters a problem, the index
|
||||
// will transition to the `NEEDS_REPAIR` state.
|
||||
enum State {
|
||||
// The state is unspecified.
|
||||
STATE_UNSPECIFIED = 0;
|
||||
|
||||
// The index is being created.
|
||||
// There is an active long-running operation for the index.
|
||||
// The index is updated when writing a document.
|
||||
// Some index data may exist.
|
||||
CREATING = 1;
|
||||
|
||||
// The index is ready to be used.
|
||||
// The index is updated when writing a document.
|
||||
// The index is fully populated from all stored documents it applies to.
|
||||
READY = 2;
|
||||
|
||||
// The index was being created, but something went wrong.
|
||||
// There is no active long-running operation for the index,
|
||||
// and the most recently finished long-running operation failed.
|
||||
// The index is not updated when writing a document.
|
||||
// Some index data may exist.
|
||||
// Use the google.longrunning.Operations API to determine why the operation
|
||||
// that last attempted to create this index failed, then re-create the
|
||||
// index.
|
||||
NEEDS_REPAIR = 3;
|
||||
}
|
||||
|
||||
// Output only. A server defined name for this index.
|
||||
// The form of this name for composite indexes will be:
|
||||
// `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{composite_index_id}`
|
||||
// For single field indexes, this field will be empty.
|
||||
string name = 1;
|
||||
|
||||
// Indexes with a collection query scope specified allow queries
|
||||
// against a collection that is the child of a specific document, specified at
|
||||
// query time, and that has the same collection ID.
|
||||
//
|
||||
// Indexes with a collection group query scope specified allow queries against
|
||||
// all collections descended from a specific document, specified at query
|
||||
// time, and that have the same collection ID as this index.
|
||||
QueryScope query_scope = 2;
|
||||
|
||||
// The API scope supported by this index.
|
||||
ApiScope api_scope = 5;
|
||||
|
||||
// The fields supported by this index.
|
||||
//
|
||||
// For composite indexes, this requires a minimum of 2 and a maximum of 100
|
||||
// fields. The last field entry is always for the field path `__name__`. If,
|
||||
// on creation, `__name__` was not specified as the last field, it will be
|
||||
// added automatically with the same direction as that of the last field
|
||||
// defined. If the final field in a composite index is not directional, the
|
||||
// `__name__` will be ordered ASCENDING (unless explicitly specified).
|
||||
//
|
||||
// For single field indexes, this will always be exactly one entry with a
|
||||
// field path equal to the field path of the associated field.
|
||||
repeated IndexField fields = 3;
|
||||
|
||||
// Output only. The serving state of the index.
|
||||
State state = 4;
|
||||
}
|
||||
30
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/admin/v1/location.proto
generated
vendored
Normal file
30
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/admin/v1/location.proto
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.admin.v1;
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.Admin.V1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1/admin/adminpb;adminpb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "LocationProto";
|
||||
option java_package = "com.google.firestore.admin.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::Admin::V1";
|
||||
|
||||
// The metadata message for
|
||||
// [google.cloud.location.Location.metadata][google.cloud.location.Location.metadata].
|
||||
message LocationMetadata {}
|
||||
300
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/admin/v1/operation.proto
generated
vendored
Normal file
300
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/admin/v1/operation.proto
generated
vendored
Normal file
@@ -0,0 +1,300 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.admin.v1;
|
||||
|
||||
import "google/api/resource.proto";
|
||||
import "google/firestore/admin/v1/index.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.Admin.V1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1/admin/adminpb;adminpb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "OperationProto";
|
||||
option java_package = "com.google.firestore.admin.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::Admin::V1";
|
||||
|
||||
// Metadata for [google.longrunning.Operation][google.longrunning.Operation]
|
||||
// results from
|
||||
// [FirestoreAdmin.CreateIndex][google.firestore.admin.v1.FirestoreAdmin.CreateIndex].
|
||||
message IndexOperationMetadata {
|
||||
// The time this operation started.
|
||||
google.protobuf.Timestamp start_time = 1;
|
||||
|
||||
// The time this operation completed. Will be unset if operation still in
|
||||
// progress.
|
||||
google.protobuf.Timestamp end_time = 2;
|
||||
|
||||
// The index resource that this operation is acting on. For example:
|
||||
// `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}`
|
||||
string index = 3;
|
||||
|
||||
// The state of the operation.
|
||||
OperationState state = 4;
|
||||
|
||||
// The progress, in documents, of this operation.
|
||||
Progress progress_documents = 5;
|
||||
|
||||
// The progress, in bytes, of this operation.
|
||||
Progress progress_bytes = 6;
|
||||
}
|
||||
|
||||
// Metadata for [google.longrunning.Operation][google.longrunning.Operation]
|
||||
// results from
|
||||
// [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField].
|
||||
message FieldOperationMetadata {
|
||||
// Information about an index configuration change.
|
||||
message IndexConfigDelta {
|
||||
// Specifies how the index is changing.
|
||||
enum ChangeType {
|
||||
// The type of change is not specified or known.
|
||||
CHANGE_TYPE_UNSPECIFIED = 0;
|
||||
|
||||
// The single field index is being added.
|
||||
ADD = 1;
|
||||
|
||||
// The single field index is being removed.
|
||||
REMOVE = 2;
|
||||
}
|
||||
|
||||
// Specifies how the index is changing.
|
||||
ChangeType change_type = 1;
|
||||
|
||||
// The index being changed.
|
||||
Index index = 2;
|
||||
}
|
||||
|
||||
// Information about a TTL configuration change.
|
||||
message TtlConfigDelta {
|
||||
// Specifies how the TTL config is changing.
|
||||
enum ChangeType {
|
||||
// The type of change is not specified or known.
|
||||
CHANGE_TYPE_UNSPECIFIED = 0;
|
||||
|
||||
// The TTL config is being added.
|
||||
ADD = 1;
|
||||
|
||||
// The TTL config is being removed.
|
||||
REMOVE = 2;
|
||||
}
|
||||
|
||||
// Specifies how the TTL configuration is changing.
|
||||
ChangeType change_type = 1;
|
||||
}
|
||||
|
||||
// The time this operation started.
|
||||
google.protobuf.Timestamp start_time = 1;
|
||||
|
||||
// The time this operation completed. Will be unset if operation still in
|
||||
// progress.
|
||||
google.protobuf.Timestamp end_time = 2;
|
||||
|
||||
// The field resource that this operation is acting on. For example:
|
||||
// `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_path}`
|
||||
string field = 3;
|
||||
|
||||
// A list of
|
||||
// [IndexConfigDelta][google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta],
|
||||
// which describe the intent of this operation.
|
||||
repeated IndexConfigDelta index_config_deltas = 4;
|
||||
|
||||
// The state of the operation.
|
||||
OperationState state = 5;
|
||||
|
||||
// The progress, in documents, of this operation.
|
||||
Progress progress_documents = 6;
|
||||
|
||||
// The progress, in bytes, of this operation.
|
||||
Progress progress_bytes = 7;
|
||||
|
||||
// Describes the deltas of TTL configuration.
|
||||
TtlConfigDelta ttl_config_delta = 8;
|
||||
}
|
||||
|
||||
// Metadata for [google.longrunning.Operation][google.longrunning.Operation]
|
||||
// results from
|
||||
// [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1.FirestoreAdmin.ExportDocuments].
|
||||
message ExportDocumentsMetadata {
|
||||
// The time this operation started.
|
||||
google.protobuf.Timestamp start_time = 1;
|
||||
|
||||
// The time this operation completed. Will be unset if operation still in
|
||||
// progress.
|
||||
google.protobuf.Timestamp end_time = 2;
|
||||
|
||||
// The state of the export operation.
|
||||
OperationState operation_state = 3;
|
||||
|
||||
// The progress, in documents, of this operation.
|
||||
Progress progress_documents = 4;
|
||||
|
||||
// The progress, in bytes, of this operation.
|
||||
Progress progress_bytes = 5;
|
||||
|
||||
// Which collection IDs are being exported.
|
||||
repeated string collection_ids = 6;
|
||||
|
||||
// Where the documents are being exported to.
|
||||
string output_uri_prefix = 7;
|
||||
|
||||
// Which namespace IDs are being exported.
|
||||
repeated string namespace_ids = 8;
|
||||
|
||||
// The timestamp that corresponds to the version of the database that is being
|
||||
// exported. If unspecified, there are no guarantees about the consistency of
|
||||
// the documents being exported.
|
||||
google.protobuf.Timestamp snapshot_time = 9;
|
||||
}
|
||||
|
||||
// Metadata for [google.longrunning.Operation][google.longrunning.Operation]
|
||||
// results from
|
||||
// [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1.FirestoreAdmin.ImportDocuments].
|
||||
message ImportDocumentsMetadata {
|
||||
// The time this operation started.
|
||||
google.protobuf.Timestamp start_time = 1;
|
||||
|
||||
// The time this operation completed. Will be unset if operation still in
|
||||
// progress.
|
||||
google.protobuf.Timestamp end_time = 2;
|
||||
|
||||
// The state of the import operation.
|
||||
OperationState operation_state = 3;
|
||||
|
||||
// The progress, in documents, of this operation.
|
||||
Progress progress_documents = 4;
|
||||
|
||||
// The progress, in bytes, of this operation.
|
||||
Progress progress_bytes = 5;
|
||||
|
||||
// Which collection IDs are being imported.
|
||||
repeated string collection_ids = 6;
|
||||
|
||||
// The location of the documents being imported.
|
||||
string input_uri_prefix = 7;
|
||||
|
||||
// Which namespace IDs are being imported.
|
||||
repeated string namespace_ids = 8;
|
||||
}
|
||||
|
||||
// Metadata for [google.longrunning.Operation][google.longrunning.Operation]
|
||||
// results from
|
||||
// [FirestoreAdmin.BulkDeleteDocuments][google.firestore.admin.v1.FirestoreAdmin.BulkDeleteDocuments].
|
||||
message BulkDeleteDocumentsMetadata {
|
||||
// The time this operation started.
|
||||
google.protobuf.Timestamp start_time = 1;
|
||||
|
||||
// The time this operation completed. Will be unset if operation still in
|
||||
// progress.
|
||||
google.protobuf.Timestamp end_time = 2;
|
||||
|
||||
// The state of the operation.
|
||||
OperationState operation_state = 3;
|
||||
|
||||
// The progress, in documents, of this operation.
|
||||
Progress progress_documents = 4;
|
||||
|
||||
// The progress, in bytes, of this operation.
|
||||
Progress progress_bytes = 5;
|
||||
|
||||
// The IDs of the collection groups that are being deleted.
|
||||
repeated string collection_ids = 6;
|
||||
|
||||
// Which namespace IDs are being deleted.
|
||||
repeated string namespace_ids = 7;
|
||||
|
||||
// The timestamp that corresponds to the version of the database that is being
|
||||
// read to get the list of documents to delete. This time can also be used as
|
||||
// the timestamp of PITR in case of disaster recovery (subject to PITR window
|
||||
// limit).
|
||||
google.protobuf.Timestamp snapshot_time = 8;
|
||||
}
|
||||
|
||||
// Returned in the [google.longrunning.Operation][google.longrunning.Operation]
|
||||
// response field.
|
||||
message ExportDocumentsResponse {
|
||||
// Location of the output files. This can be used to begin an import
|
||||
// into Cloud Firestore (this project or another project) after the operation
|
||||
// completes successfully.
|
||||
string output_uri_prefix = 1;
|
||||
}
|
||||
|
||||
// Metadata for the [long-running operation][google.longrunning.Operation] from
|
||||
// the [RestoreDatabase][google.firestore.admin.v1.RestoreDatabase] request.
|
||||
message RestoreDatabaseMetadata {
|
||||
// The time the restore was started.
|
||||
google.protobuf.Timestamp start_time = 1;
|
||||
|
||||
// The time the restore finished, unset for ongoing restores.
|
||||
google.protobuf.Timestamp end_time = 2;
|
||||
|
||||
// The operation state of the restore.
|
||||
OperationState operation_state = 3;
|
||||
|
||||
// The name of the database being restored to.
|
||||
string database = 4 [(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/Database"
|
||||
}];
|
||||
|
||||
// The name of the backup restoring from.
|
||||
string backup = 5 [(google.api.resource_reference) = {
|
||||
type: "firestore.googleapis.com/Backup"
|
||||
}];
|
||||
|
||||
// How far along the restore is as an estimated percentage of remaining time.
|
||||
Progress progress_percentage = 8;
|
||||
}
|
||||
|
||||
// Describes the progress of the operation.
|
||||
// Unit of work is generic and must be interpreted based on where
|
||||
// [Progress][google.firestore.admin.v1.Progress] is used.
|
||||
message Progress {
|
||||
// The amount of work estimated.
|
||||
int64 estimated_work = 1;
|
||||
|
||||
// The amount of work completed.
|
||||
int64 completed_work = 2;
|
||||
}
|
||||
|
||||
// Describes the state of the operation.
|
||||
enum OperationState {
|
||||
// Unspecified.
|
||||
OPERATION_STATE_UNSPECIFIED = 0;
|
||||
|
||||
// Request is being prepared for processing.
|
||||
INITIALIZING = 1;
|
||||
|
||||
// Request is actively being processed.
|
||||
PROCESSING = 2;
|
||||
|
||||
// Request is in the process of being cancelled after user called
|
||||
// google.longrunning.Operations.CancelOperation on the operation.
|
||||
CANCELLING = 3;
|
||||
|
||||
// Request has been processed and is in its finalization stage.
|
||||
FINALIZING = 4;
|
||||
|
||||
// Request has completed successfully.
|
||||
SUCCESSFUL = 5;
|
||||
|
||||
// Request has finished being processed, but encountered an error.
|
||||
FAILED = 6;
|
||||
|
||||
// Request has finished being cancelled after user called
|
||||
// google.longrunning.Operations.CancelOperation.
|
||||
CANCELLED = 7;
|
||||
}
|
||||
95
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/admin/v1/schedule.proto
generated
vendored
Normal file
95
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/admin/v1/schedule.proto
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.admin.v1;
|
||||
|
||||
import "google/api/field_behavior.proto";
|
||||
import "google/api/resource.proto";
|
||||
import "google/protobuf/duration.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/type/dayofweek.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.Admin.V1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1/admin/adminpb;adminpb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "ScheduleProto";
|
||||
option java_package = "com.google.firestore.admin.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::Admin::V1";
|
||||
|
||||
// A backup schedule for a Cloud Firestore Database.
|
||||
//
|
||||
// This resource is owned by the database it is backing up, and is deleted along
|
||||
// with the database. The actual backups are not though.
|
||||
message BackupSchedule {
|
||||
option (google.api.resource) = {
|
||||
type: "firestore.googleapis.com/BackupSchedule"
|
||||
pattern: "projects/{project}/databases/{database}/backupSchedules/{backup_schedule}"
|
||||
};
|
||||
|
||||
// Output only. The unique backup schedule identifier across all locations and
|
||||
// databases for the given project.
|
||||
//
|
||||
// This will be auto-assigned.
|
||||
//
|
||||
// Format is
|
||||
// `projects/{project}/databases/{database}/backupSchedules/{backup_schedule}`
|
||||
string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Output only. The timestamp at which this backup schedule was created and
|
||||
// effective since.
|
||||
//
|
||||
// No backups will be created for this schedule before this time.
|
||||
google.protobuf.Timestamp create_time = 3
|
||||
[(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Output only. The timestamp at which this backup schedule was most recently
|
||||
// updated. When a backup schedule is first created, this is the same as
|
||||
// create_time.
|
||||
google.protobuf.Timestamp update_time = 10
|
||||
[(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// At what relative time in the future, compared to its creation time,
|
||||
// the backup should be deleted, e.g. keep backups for 7 days.
|
||||
//
|
||||
// The maximum supported retention period is 14 weeks.
|
||||
google.protobuf.Duration retention = 6;
|
||||
|
||||
// A oneof field to represent when backups will be taken.
|
||||
oneof recurrence {
|
||||
// For a schedule that runs daily.
|
||||
DailyRecurrence daily_recurrence = 7;
|
||||
|
||||
// For a schedule that runs weekly on a specific day.
|
||||
WeeklyRecurrence weekly_recurrence = 8;
|
||||
}
|
||||
}
|
||||
|
||||
// Represents a recurring schedule that runs every day.
|
||||
//
|
||||
// The time zone is UTC.
|
||||
message DailyRecurrence {}
|
||||
|
||||
// Represents a recurring schedule that runs on a specified day of the week.
|
||||
//
|
||||
// The time zone is UTC.
|
||||
message WeeklyRecurrence {
|
||||
// The day of week to run.
|
||||
//
|
||||
// DAY_OF_WEEK_UNSPECIFIED is not allowed.
|
||||
google.type.DayOfWeek day = 2;
|
||||
}
|
||||
43
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1/aggregation_result.proto
generated
vendored
Normal file
43
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1/aggregation_result.proto
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1;
|
||||
|
||||
import "google/firestore/v1/document.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1/firestorepb;firestorepb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "AggregationResultProto";
|
||||
option java_package = "com.google.firestore.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1";
|
||||
|
||||
// The result of a single bucket from a Firestore aggregation query.
|
||||
//
|
||||
// The keys of `aggregate_fields` are the same for all results in an aggregation
|
||||
// query, unlike document queries which can have different fields present for
|
||||
// each result.
|
||||
message AggregationResult {
|
||||
// The result of the aggregation functions, ex: `COUNT(*) AS total_docs`.
|
||||
//
|
||||
// The key is the
|
||||
// [alias][google.firestore.v1.StructuredAggregationQuery.Aggregation.alias]
|
||||
// assigned to the aggregation function on input and the size of this map
|
||||
// equals the number of aggregation functions in the query.
|
||||
map<string, Value> aggregate_fields = 2;
|
||||
}
|
||||
73
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1/bloom_filter.proto
generated
vendored
Normal file
73
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1/bloom_filter.proto
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1;
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1/firestorepb;firestorepb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "BloomFilterProto";
|
||||
option java_package = "com.google.firestore.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1";
|
||||
|
||||
// A sequence of bits, encoded in a byte array.
|
||||
//
|
||||
// Each byte in the `bitmap` byte array stores 8 bits of the sequence. The only
|
||||
// exception is the last byte, which may store 8 _or fewer_ bits. The `padding`
|
||||
// defines the number of bits of the last byte to be ignored as "padding". The
|
||||
// values of these "padding" bits are unspecified and must be ignored.
|
||||
//
|
||||
// To retrieve the first bit, bit 0, calculate: `(bitmap[0] & 0x01) != 0`.
|
||||
// To retrieve the second bit, bit 1, calculate: `(bitmap[0] & 0x02) != 0`.
|
||||
// To retrieve the third bit, bit 2, calculate: `(bitmap[0] & 0x04) != 0`.
|
||||
// To retrieve the fourth bit, bit 3, calculate: `(bitmap[0] & 0x08) != 0`.
|
||||
// To retrieve bit n, calculate: `(bitmap[n / 8] & (0x01 << (n % 8))) != 0`.
|
||||
//
|
||||
// The "size" of a `BitSequence` (the number of bits it contains) is calculated
|
||||
// by this formula: `(bitmap.length * 8) - padding`.
|
||||
message BitSequence {
|
||||
// The bytes that encode the bit sequence.
|
||||
// May have a length of zero.
|
||||
bytes bitmap = 1;
|
||||
|
||||
// The number of bits of the last byte in `bitmap` to ignore as "padding".
|
||||
// If the length of `bitmap` is zero, then this value must be `0`.
|
||||
// Otherwise, this value must be between 0 and 7, inclusive.
|
||||
int32 padding = 2;
|
||||
}
|
||||
|
||||
// A bloom filter (https://en.wikipedia.org/wiki/Bloom_filter).
|
||||
//
|
||||
// The bloom filter hashes the entries with MD5 and treats the resulting 128-bit
|
||||
// hash as 2 distinct 64-bit hash values, interpreted as unsigned integers
|
||||
// using 2's complement encoding.
|
||||
//
|
||||
// These two hash values, named `h1` and `h2`, are then used to compute the
|
||||
// `hash_count` hash values using the formula, starting at `i=0`:
|
||||
//
|
||||
// h(i) = h1 + (i * h2)
|
||||
//
|
||||
// These resulting values are then taken modulo the number of bits in the bloom
|
||||
// filter to get the bits of the bloom filter to test for the given entry.
|
||||
message BloomFilter {
|
||||
// The bloom filter data.
|
||||
BitSequence bits = 1;
|
||||
|
||||
// The number of hashes used by the algorithm.
|
||||
int32 hash_count = 2;
|
||||
}
|
||||
90
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1/common.proto
generated
vendored
Normal file
90
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1/common.proto
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1;
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1/firestorepb;firestorepb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "CommonProto";
|
||||
option java_package = "com.google.firestore.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1";
|
||||
|
||||
// A set of field paths on a document.
|
||||
// Used to restrict a get or update operation on a document to a subset of its
|
||||
// fields.
|
||||
// This is different from standard field masks, as this is always scoped to a
|
||||
// [Document][google.firestore.v1.Document], and takes in account the dynamic
|
||||
// nature of [Value][google.firestore.v1.Value].
|
||||
message DocumentMask {
|
||||
// The list of field paths in the mask. See
|
||||
// [Document.fields][google.firestore.v1.Document.fields] for a field path
|
||||
// syntax reference.
|
||||
repeated string field_paths = 1;
|
||||
}
|
||||
|
||||
// A precondition on a document, used for conditional operations.
|
||||
message Precondition {
|
||||
// The type of precondition.
|
||||
oneof condition_type {
|
||||
// When set to `true`, the target document must exist.
|
||||
// When set to `false`, the target document must not exist.
|
||||
bool exists = 1;
|
||||
|
||||
// When set, the target document must exist and have been last updated at
|
||||
// that time. Timestamp must be microsecond aligned.
|
||||
google.protobuf.Timestamp update_time = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Options for creating a new transaction.
|
||||
message TransactionOptions {
|
||||
// Options for a transaction that can be used to read and write documents.
|
||||
//
|
||||
// Firestore does not allow 3rd party auth requests to create read-write.
|
||||
// transactions.
|
||||
message ReadWrite {
|
||||
// An optional transaction to retry.
|
||||
bytes retry_transaction = 1;
|
||||
}
|
||||
|
||||
// Options for a transaction that can only be used to read documents.
|
||||
message ReadOnly {
|
||||
// The consistency mode for this transaction. If not set, defaults to strong
|
||||
// consistency.
|
||||
oneof consistency_selector {
|
||||
// Reads documents at the given time.
|
||||
//
|
||||
// This must be a microsecond precision timestamp within the past one
|
||||
// hour, or if Point-in-Time Recovery is enabled, can additionally be a
|
||||
// whole minute timestamp within the past 7 days.
|
||||
google.protobuf.Timestamp read_time = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// The mode of the transaction.
|
||||
oneof mode {
|
||||
// The transaction can only be used for read operations.
|
||||
ReadOnly read_only = 2;
|
||||
|
||||
// The transaction can be used for both read and write operations.
|
||||
ReadWrite read_write = 3;
|
||||
}
|
||||
}
|
||||
150
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1/document.proto
generated
vendored
Normal file
150
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1/document.proto
generated
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1;
|
||||
|
||||
import "google/api/field_behavior.proto";
|
||||
import "google/protobuf/struct.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/type/latlng.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1/firestorepb;firestorepb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "DocumentProto";
|
||||
option java_package = "com.google.firestore.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1";
|
||||
|
||||
// A Firestore document.
|
||||
//
|
||||
// Must not exceed 1 MiB - 4 bytes.
|
||||
message Document {
|
||||
// The resource name of the document, for example
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
string name = 1;
|
||||
|
||||
// The document's fields.
|
||||
//
|
||||
// The map keys represent field names.
|
||||
//
|
||||
// Field names matching the regular expression `__.*__` are reserved. Reserved
|
||||
// field names are forbidden except in certain documented contexts. The field
|
||||
// names, represented as UTF-8, must not exceed 1,500 bytes and cannot be
|
||||
// empty.
|
||||
//
|
||||
// Field paths may be used in other contexts to refer to structured fields
|
||||
// defined here. For `map_value`, the field path is represented by a
|
||||
// dot-delimited (`.`) string of segments. Each segment is either a simple
|
||||
// field name (defined below) or a quoted field name. For example, the
|
||||
// structured field `"foo" : { map_value: { "x&y" : { string_value: "hello"
|
||||
// }}}` would be represented by the field path `` foo.`x&y` ``.
|
||||
//
|
||||
// A simple field name contains only characters `a` to `z`, `A` to `Z`,
|
||||
// `0` to `9`, or `_`, and must not start with `0` to `9`. For example,
|
||||
// `foo_bar_17`.
|
||||
//
|
||||
// A quoted field name starts and ends with `` ` `` and
|
||||
// may contain any character. Some characters, including `` ` ``, must be
|
||||
// escaped using a `\`. For example, `` `x&y` `` represents `x&y` and
|
||||
// `` `bak\`tik` `` represents `` bak`tik ``.
|
||||
map<string, Value> fields = 2;
|
||||
|
||||
// Output only. The time at which the document was created.
|
||||
//
|
||||
// This value increases monotonically when a document is deleted then
|
||||
// recreated. It can also be compared to values from other documents and
|
||||
// the `read_time` of a query.
|
||||
google.protobuf.Timestamp create_time = 3;
|
||||
|
||||
// Output only. The time at which the document was last changed.
|
||||
//
|
||||
// This value is initially set to the `create_time` then increases
|
||||
// monotonically with each change to the document. It can also be
|
||||
// compared to values from other documents and the `read_time` of a query.
|
||||
google.protobuf.Timestamp update_time = 4;
|
||||
}
|
||||
|
||||
// A message that can hold any of the supported value types.
|
||||
message Value {
|
||||
// Must have a value set.
|
||||
oneof value_type {
|
||||
// A null value.
|
||||
google.protobuf.NullValue null_value = 11;
|
||||
|
||||
// A boolean value.
|
||||
bool boolean_value = 1;
|
||||
|
||||
// An integer value.
|
||||
int64 integer_value = 2;
|
||||
|
||||
// A double value.
|
||||
double double_value = 3;
|
||||
|
||||
// A timestamp value.
|
||||
//
|
||||
// Precise only to microseconds. When stored, any additional precision is
|
||||
// rounded down.
|
||||
google.protobuf.Timestamp timestamp_value = 10;
|
||||
|
||||
// A string value.
|
||||
//
|
||||
// The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes.
|
||||
// Only the first 1,500 bytes of the UTF-8 representation are considered by
|
||||
// queries.
|
||||
string string_value = 17;
|
||||
|
||||
// A bytes value.
|
||||
//
|
||||
// Must not exceed 1 MiB - 89 bytes.
|
||||
// Only the first 1,500 bytes are considered by queries.
|
||||
bytes bytes_value = 18;
|
||||
|
||||
// A reference to a document. For example:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
string reference_value = 5;
|
||||
|
||||
// A geo point value representing a point on the surface of Earth.
|
||||
google.type.LatLng geo_point_value = 8;
|
||||
|
||||
// An array value.
|
||||
//
|
||||
// Cannot directly contain another array value, though can contain a
|
||||
// map which contains another array.
|
||||
ArrayValue array_value = 9;
|
||||
|
||||
// A map value.
|
||||
MapValue map_value = 6;
|
||||
}
|
||||
}
|
||||
|
||||
// An array value.
|
||||
message ArrayValue {
|
||||
// Values in the array.
|
||||
repeated Value values = 1;
|
||||
}
|
||||
|
||||
// A map value.
|
||||
message MapValue {
|
||||
// The map's fields.
|
||||
//
|
||||
// The map keys represent field names. Field names matching the regular
|
||||
// expression `__.*__` are reserved. Reserved field names are forbidden except
|
||||
// in certain documented contexts. The map keys, represented as UTF-8, must
|
||||
// not exceed 1,500 bytes and cannot be empty.
|
||||
map<string, Value> fields = 1;
|
||||
}
|
||||
1128
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1/firestore.proto
generated
vendored
Normal file
1128
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1/firestore.proto
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
589
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1/query.proto
generated
vendored
Normal file
589
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1/query.proto
generated
vendored
Normal file
@@ -0,0 +1,589 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1;
|
||||
|
||||
import "google/api/field_behavior.proto";
|
||||
import "google/firestore/v1/document.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1/firestorepb;firestorepb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "QueryProto";
|
||||
option java_package = "com.google.firestore.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1";
|
||||
|
||||
// A Firestore query.
|
||||
//
|
||||
// The query stages are executed in the following order:
|
||||
// 1. from
|
||||
// 2. where
|
||||
// 3. select
|
||||
// 4. order_by + start_at + end_at
|
||||
// 5. offset
|
||||
// 6. limit
|
||||
message StructuredQuery {
|
||||
// A selection of a collection, such as `messages as m1`.
|
||||
message CollectionSelector {
|
||||
// The collection ID.
|
||||
// When set, selects only collections with this ID.
|
||||
string collection_id = 2;
|
||||
|
||||
// When false, selects only collections that are immediate children of
|
||||
// the `parent` specified in the containing `RunQueryRequest`.
|
||||
// When true, selects all descendant collections.
|
||||
bool all_descendants = 3;
|
||||
}
|
||||
|
||||
// A filter.
|
||||
message Filter {
|
||||
// The type of filter.
|
||||
oneof filter_type {
|
||||
// A composite filter.
|
||||
CompositeFilter composite_filter = 1;
|
||||
|
||||
// A filter on a document field.
|
||||
FieldFilter field_filter = 2;
|
||||
|
||||
// A filter that takes exactly one argument.
|
||||
UnaryFilter unary_filter = 3;
|
||||
}
|
||||
}
|
||||
|
||||
// A filter that merges multiple other filters using the given operator.
|
||||
message CompositeFilter {
|
||||
// A composite filter operator.
|
||||
enum Operator {
|
||||
// Unspecified. This value must not be used.
|
||||
OPERATOR_UNSPECIFIED = 0;
|
||||
|
||||
// Documents are required to satisfy all of the combined filters.
|
||||
AND = 1;
|
||||
|
||||
// Documents are required to satisfy at least one of the combined filters.
|
||||
OR = 2;
|
||||
}
|
||||
|
||||
// The operator for combining multiple filters.
|
||||
Operator op = 1;
|
||||
|
||||
// The list of filters to combine.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * At least one filter is present.
|
||||
repeated Filter filters = 2;
|
||||
}
|
||||
|
||||
// A filter on a specific field.
|
||||
message FieldFilter {
|
||||
// A field filter operator.
|
||||
enum Operator {
|
||||
// Unspecified. This value must not be used.
|
||||
OPERATOR_UNSPECIFIED = 0;
|
||||
|
||||
// The given `field` is less than the given `value`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `field` come first in `order_by`.
|
||||
LESS_THAN = 1;
|
||||
|
||||
// The given `field` is less than or equal to the given `value`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `field` come first in `order_by`.
|
||||
LESS_THAN_OR_EQUAL = 2;
|
||||
|
||||
// The given `field` is greater than the given `value`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `field` come first in `order_by`.
|
||||
GREATER_THAN = 3;
|
||||
|
||||
// The given `field` is greater than or equal to the given `value`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `field` come first in `order_by`.
|
||||
GREATER_THAN_OR_EQUAL = 4;
|
||||
|
||||
// The given `field` is equal to the given `value`.
|
||||
EQUAL = 5;
|
||||
|
||||
// The given `field` is not equal to the given `value`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * No other `NOT_EQUAL`, `NOT_IN`, `IS_NOT_NULL`, or `IS_NOT_NAN`.
|
||||
// * That `field` comes first in the `order_by`.
|
||||
NOT_EQUAL = 6;
|
||||
|
||||
// The given `field` is an array that contains the given `value`.
|
||||
ARRAY_CONTAINS = 7;
|
||||
|
||||
// The given `field` is equal to at least one value in the given array.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `value` is a non-empty `ArrayValue`, subject to disjunction
|
||||
// limits.
|
||||
// * No `NOT_IN` filters in the same query.
|
||||
IN = 8;
|
||||
|
||||
// The given `field` is an array that contains any of the values in the
|
||||
// given array.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `value` is a non-empty `ArrayValue`, subject to disjunction
|
||||
// limits.
|
||||
// * No other `ARRAY_CONTAINS_ANY` filters within the same disjunction.
|
||||
// * No `NOT_IN` filters in the same query.
|
||||
ARRAY_CONTAINS_ANY = 9;
|
||||
|
||||
// The value of the `field` is not in the given array.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `value` is a non-empty `ArrayValue` with at most 10 values.
|
||||
// * No other `OR`, `IN`, `ARRAY_CONTAINS_ANY`, `NOT_IN`, `NOT_EQUAL`,
|
||||
// `IS_NOT_NULL`, or `IS_NOT_NAN`.
|
||||
// * That `field` comes first in the `order_by`.
|
||||
NOT_IN = 10;
|
||||
}
|
||||
|
||||
// The field to filter by.
|
||||
FieldReference field = 1;
|
||||
|
||||
// The operator to filter by.
|
||||
Operator op = 2;
|
||||
|
||||
// The value to compare to.
|
||||
Value value = 3;
|
||||
}
|
||||
|
||||
// A filter with a single operand.
|
||||
message UnaryFilter {
|
||||
// A unary operator.
|
||||
enum Operator {
|
||||
// Unspecified. This value must not be used.
|
||||
OPERATOR_UNSPECIFIED = 0;
|
||||
|
||||
// The given `field` is equal to `NaN`.
|
||||
IS_NAN = 2;
|
||||
|
||||
// The given `field` is equal to `NULL`.
|
||||
IS_NULL = 3;
|
||||
|
||||
// The given `field` is not equal to `NaN`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * No other `NOT_EQUAL`, `NOT_IN`, `IS_NOT_NULL`, or `IS_NOT_NAN`.
|
||||
// * That `field` comes first in the `order_by`.
|
||||
IS_NOT_NAN = 4;
|
||||
|
||||
// The given `field` is not equal to `NULL`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * A single `NOT_EQUAL`, `NOT_IN`, `IS_NOT_NULL`, or `IS_NOT_NAN`.
|
||||
// * That `field` comes first in the `order_by`.
|
||||
IS_NOT_NULL = 5;
|
||||
}
|
||||
|
||||
// The unary operator to apply.
|
||||
Operator op = 1;
|
||||
|
||||
// The argument to the filter.
|
||||
oneof operand_type {
|
||||
// The field to which to apply the operator.
|
||||
FieldReference field = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// An order on a field.
|
||||
message Order {
|
||||
// The field to order by.
|
||||
FieldReference field = 1;
|
||||
|
||||
// The direction to order by. Defaults to `ASCENDING`.
|
||||
Direction direction = 2;
|
||||
}
|
||||
|
||||
// A sort direction.
|
||||
enum Direction {
|
||||
// Unspecified.
|
||||
DIRECTION_UNSPECIFIED = 0;
|
||||
|
||||
// Ascending.
|
||||
ASCENDING = 1;
|
||||
|
||||
// Descending.
|
||||
DESCENDING = 2;
|
||||
}
|
||||
|
||||
// A reference to a field in a document, ex: `stats.operations`.
|
||||
message FieldReference {
|
||||
// A reference to a field in a document.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * MUST be a dot-delimited (`.`) string of segments, where each segment
|
||||
// conforms to [document field name][google.firestore.v1.Document.fields]
|
||||
// limitations.
|
||||
string field_path = 2;
|
||||
}
|
||||
|
||||
// The projection of document's fields to return.
|
||||
message Projection {
|
||||
// The fields to return.
|
||||
//
|
||||
// If empty, all fields are returned. To only return the name
|
||||
// of the document, use `['__name__']`.
|
||||
repeated FieldReference fields = 2;
|
||||
}
|
||||
|
||||
// Nearest Neighbors search config. The ordering provided by FindNearest
|
||||
// supersedes the order_by stage. If multiple documents have the same vector
|
||||
// distance, the returned document order is not guaranteed to be stable
|
||||
// between queries.
|
||||
message FindNearest {
|
||||
// The distance measure to use when comparing vectors.
|
||||
enum DistanceMeasure {
|
||||
// Should not be set.
|
||||
DISTANCE_MEASURE_UNSPECIFIED = 0;
|
||||
|
||||
// Measures the EUCLIDEAN distance between the vectors. See
|
||||
// [Euclidean](https://en.wikipedia.org/wiki/Euclidean_distance) to learn
|
||||
// more. The resulting distance decreases the more similar two vectors
|
||||
// are.
|
||||
EUCLIDEAN = 1;
|
||||
|
||||
// COSINE distance compares vectors based on the angle between them, which
|
||||
// allows you to measure similarity that isn't based on the vectors
|
||||
// magnitude. We recommend using DOT_PRODUCT with unit normalized vectors
|
||||
// instead of COSINE distance, which is mathematically equivalent with
|
||||
// better performance. See [Cosine
|
||||
// Similarity](https://en.wikipedia.org/wiki/Cosine_similarity) to learn
|
||||
// more about COSINE similarity and COSINE distance. The resulting
|
||||
// COSINE distance decreases the more similar two vectors are.
|
||||
COSINE = 2;
|
||||
|
||||
// Similar to cosine but is affected by the magnitude of the vectors. See
|
||||
// [Dot Product](https://en.wikipedia.org/wiki/Dot_product) to learn more.
|
||||
// The resulting distance increases the more similar two vectors are.
|
||||
DOT_PRODUCT = 3;
|
||||
}
|
||||
|
||||
// Required. An indexed vector field to search upon. Only documents which
|
||||
// contain vectors whose dimensionality match the query_vector can be
|
||||
// returned.
|
||||
FieldReference vector_field = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Required. The query vector that we are searching on. Must be a vector of
|
||||
// no more than 2048 dimensions.
|
||||
Value query_vector = 2 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Required. The distance measure to use, required.
|
||||
DistanceMeasure distance_measure = 3
|
||||
[(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Required. The number of nearest neighbors to return. Must be a positive
|
||||
// integer of no more than 1000.
|
||||
google.protobuf.Int32Value limit = 4
|
||||
[(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Optional. Optional name of the field to output the result of the vector
|
||||
// distance calculation. Must conform to [document field
|
||||
// name][google.firestore.v1.Document.fields] limitations.
|
||||
string distance_result_field = 5 [(google.api.field_behavior) = OPTIONAL];
|
||||
|
||||
// Optional. Option to specify a threshold for which no less similar
|
||||
// documents will be returned. The behavior of the specified
|
||||
// `distance_measure` will affect the meaning of the distance threshold.
|
||||
// Since DOT_PRODUCT distances increase when the vectors are more similar,
|
||||
// the comparison is inverted.
|
||||
//
|
||||
// For EUCLIDEAN, COSINE: WHERE distance <= distance_threshold
|
||||
// For DOT_PRODUCT: WHERE distance >= distance_threshold
|
||||
google.protobuf.DoubleValue distance_threshold = 6
|
||||
[(google.api.field_behavior) = OPTIONAL];
|
||||
}
|
||||
|
||||
// Optional sub-set of the fields to return.
|
||||
//
|
||||
// This acts as a [DocumentMask][google.firestore.v1.DocumentMask] over the
|
||||
// documents returned from a query. When not set, assumes that the caller
|
||||
// wants all fields returned.
|
||||
Projection select = 1;
|
||||
|
||||
// The collections to query.
|
||||
repeated CollectionSelector from = 2;
|
||||
|
||||
// The filter to apply.
|
||||
Filter where = 3;
|
||||
|
||||
// The order to apply to the query results.
|
||||
//
|
||||
// Firestore allows callers to provide a full ordering, a partial ordering, or
|
||||
// no ordering at all. In all cases, Firestore guarantees a stable ordering
|
||||
// through the following rules:
|
||||
//
|
||||
// * The `order_by` is required to reference all fields used with an
|
||||
// inequality filter.
|
||||
// * All fields that are required to be in the `order_by` but are not already
|
||||
// present are appended in lexicographical ordering of the field name.
|
||||
// * If an order on `__name__` is not specified, it is appended by default.
|
||||
//
|
||||
// Fields are appended with the same sort direction as the last order
|
||||
// specified, or 'ASCENDING' if no order was specified. For example:
|
||||
//
|
||||
// * `ORDER BY a` becomes `ORDER BY a ASC, __name__ ASC`
|
||||
// * `ORDER BY a DESC` becomes `ORDER BY a DESC, __name__ DESC`
|
||||
// * `WHERE a > 1` becomes `WHERE a > 1 ORDER BY a ASC, __name__ ASC`
|
||||
// * `WHERE __name__ > ... AND a > 1` becomes
|
||||
// `WHERE __name__ > ... AND a > 1 ORDER BY a ASC, __name__ ASC`
|
||||
repeated Order order_by = 4;
|
||||
|
||||
// A potential prefix of a position in the result set to start the query at.
|
||||
//
|
||||
// The ordering of the result set is based on the `ORDER BY` clause of the
|
||||
// original query.
|
||||
//
|
||||
// ```
|
||||
// SELECT * FROM k WHERE a = 1 AND b > 2 ORDER BY b ASC, __name__ ASC;
|
||||
// ```
|
||||
//
|
||||
// This query's results are ordered by `(b ASC, __name__ ASC)`.
|
||||
//
|
||||
// Cursors can reference either the full ordering or a prefix of the location,
|
||||
// though it cannot reference more fields than what are in the provided
|
||||
// `ORDER BY`.
|
||||
//
|
||||
// Continuing off the example above, attaching the following start cursors
|
||||
// will have varying impact:
|
||||
//
|
||||
// - `START BEFORE (2, /k/123)`: start the query right before `a = 1 AND
|
||||
// b > 2 AND __name__ > /k/123`.
|
||||
// - `START AFTER (10)`: start the query right after `a = 1 AND b > 10`.
|
||||
//
|
||||
// Unlike `OFFSET` which requires scanning over the first N results to skip,
|
||||
// a start cursor allows the query to begin at a logical position. This
|
||||
// position is not required to match an actual result, it will scan forward
|
||||
// from this position to find the next document.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * The number of values cannot be greater than the number of fields
|
||||
// specified in the `ORDER BY` clause.
|
||||
Cursor start_at = 7;
|
||||
|
||||
// A potential prefix of a position in the result set to end the query at.
|
||||
//
|
||||
// This is similar to `START_AT` but with it controlling the end position
|
||||
// rather than the start position.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * The number of values cannot be greater than the number of fields
|
||||
// specified in the `ORDER BY` clause.
|
||||
Cursor end_at = 8;
|
||||
|
||||
// The number of documents to skip before returning the first result.
|
||||
//
|
||||
// This applies after the constraints specified by the `WHERE`, `START AT`, &
|
||||
// `END AT` but before the `LIMIT` clause.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * The value must be greater than or equal to zero if specified.
|
||||
int32 offset = 6;
|
||||
|
||||
// The maximum number of results to return.
|
||||
//
|
||||
// Applies after all other constraints.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * The value must be greater than or equal to zero if specified.
|
||||
google.protobuf.Int32Value limit = 5;
|
||||
|
||||
// Optional. A potential nearest neighbors search.
|
||||
//
|
||||
// Applies after all other filters and ordering.
|
||||
//
|
||||
// Finds the closest vector embeddings to the given query vector.
|
||||
FindNearest find_nearest = 9 [(google.api.field_behavior) = OPTIONAL];
|
||||
}
|
||||
|
||||
// Firestore query for running an aggregation over a
|
||||
// [StructuredQuery][google.firestore.v1.StructuredQuery].
|
||||
message StructuredAggregationQuery {
|
||||
// Defines an aggregation that produces a single result.
|
||||
message Aggregation {
|
||||
// Count of documents that match the query.
|
||||
//
|
||||
// The `COUNT(*)` aggregation function operates on the entire document
|
||||
// so it does not require a field reference.
|
||||
message Count {
|
||||
// Optional. Optional constraint on the maximum number of documents to
|
||||
// count.
|
||||
//
|
||||
// This provides a way to set an upper bound on the number of documents
|
||||
// to scan, limiting latency, and cost.
|
||||
//
|
||||
// Unspecified is interpreted as no bound.
|
||||
//
|
||||
// High-Level Example:
|
||||
//
|
||||
// ```
|
||||
// AGGREGATE COUNT_UP_TO(1000) OVER ( SELECT * FROM k );
|
||||
// ```
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * Must be greater than zero when present.
|
||||
google.protobuf.Int64Value up_to = 1
|
||||
[(google.api.field_behavior) = OPTIONAL];
|
||||
}
|
||||
|
||||
// Sum of the values of the requested field.
|
||||
//
|
||||
// * Only numeric values will be aggregated. All non-numeric values
|
||||
// including `NULL` are skipped.
|
||||
//
|
||||
// * If the aggregated values contain `NaN`, returns `NaN`. Infinity math
|
||||
// follows IEEE-754 standards.
|
||||
//
|
||||
// * If the aggregated value set is empty, returns 0.
|
||||
//
|
||||
// * Returns a 64-bit integer if all aggregated numbers are integers and the
|
||||
// sum result does not overflow. Otherwise, the result is returned as a
|
||||
// double. Note that even if all the aggregated values are integers, the
|
||||
// result is returned as a double if it cannot fit within a 64-bit signed
|
||||
// integer. When this occurs, the returned value will lose precision.
|
||||
//
|
||||
// * When underflow occurs, floating-point aggregation is non-deterministic.
|
||||
// This means that running the same query repeatedly without any changes to
|
||||
// the underlying values could produce slightly different results each
|
||||
// time. In those cases, values should be stored as integers over
|
||||
// floating-point numbers.
|
||||
message Sum {
|
||||
// The field to aggregate on.
|
||||
StructuredQuery.FieldReference field = 1;
|
||||
}
|
||||
|
||||
// Average of the values of the requested field.
|
||||
//
|
||||
// * Only numeric values will be aggregated. All non-numeric values
|
||||
// including `NULL` are skipped.
|
||||
//
|
||||
// * If the aggregated values contain `NaN`, returns `NaN`. Infinity math
|
||||
// follows IEEE-754 standards.
|
||||
//
|
||||
// * If the aggregated value set is empty, returns `NULL`.
|
||||
//
|
||||
// * Always returns the result as a double.
|
||||
message Avg {
|
||||
// The field to aggregate on.
|
||||
StructuredQuery.FieldReference field = 1;
|
||||
}
|
||||
|
||||
// The type of aggregation to perform, required.
|
||||
oneof operator {
|
||||
// Count aggregator.
|
||||
Count count = 1;
|
||||
|
||||
// Sum aggregator.
|
||||
Sum sum = 2;
|
||||
|
||||
// Average aggregator.
|
||||
Avg avg = 3;
|
||||
}
|
||||
|
||||
// Optional. Optional name of the field to store the result of the
|
||||
// aggregation into.
|
||||
//
|
||||
// If not provided, Firestore will pick a default name following the format
|
||||
// `field_<incremental_id++>`. For example:
|
||||
//
|
||||
// ```
|
||||
// AGGREGATE
|
||||
// COUNT_UP_TO(1) AS count_up_to_1,
|
||||
// COUNT_UP_TO(2),
|
||||
// COUNT_UP_TO(3) AS count_up_to_3,
|
||||
// COUNT(*)
|
||||
// OVER (
|
||||
// ...
|
||||
// );
|
||||
// ```
|
||||
//
|
||||
// becomes:
|
||||
//
|
||||
// ```
|
||||
// AGGREGATE
|
||||
// COUNT_UP_TO(1) AS count_up_to_1,
|
||||
// COUNT_UP_TO(2) AS field_1,
|
||||
// COUNT_UP_TO(3) AS count_up_to_3,
|
||||
// COUNT(*) AS field_2
|
||||
// OVER (
|
||||
// ...
|
||||
// );
|
||||
// ```
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * Must be unique across all aggregation aliases.
|
||||
// * Conform to [document field name][google.firestore.v1.Document.fields]
|
||||
// limitations.
|
||||
string alias = 7 [(google.api.field_behavior) = OPTIONAL];
|
||||
}
|
||||
|
||||
// The base query to aggregate over.
|
||||
oneof query_type {
|
||||
// Nested structured query.
|
||||
StructuredQuery structured_query = 1;
|
||||
}
|
||||
|
||||
// Optional. Series of aggregations to apply over the results of the
|
||||
// `structured_query`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * A minimum of one and maximum of five aggregations per query.
|
||||
repeated Aggregation aggregations = 3
|
||||
[(google.api.field_behavior) = OPTIONAL];
|
||||
}
|
||||
|
||||
// A position in a query result set.
|
||||
message Cursor {
|
||||
// The values that represent a position, in the order they appear in
|
||||
// the order by clause of a query.
|
||||
//
|
||||
// Can contain fewer values than specified in the order by clause.
|
||||
repeated Value values = 1;
|
||||
|
||||
// If the position is just before or just after the given values, relative
|
||||
// to the sort order defined by the query.
|
||||
bool before = 2;
|
||||
}
|
||||
92
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1/query_profile.proto
generated
vendored
Normal file
92
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1/query_profile.proto
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1;
|
||||
|
||||
import "google/api/field_behavior.proto";
|
||||
import "google/protobuf/duration.proto";
|
||||
import "google/protobuf/struct.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1/firestorepb;firestorepb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "QueryProfileProto";
|
||||
option java_package = "com.google.firestore.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1";
|
||||
|
||||
// Specification of the Firestore Query Profile fields.
|
||||
|
||||
// Explain options for the query.
|
||||
message ExplainOptions {
|
||||
// Optional. Whether to execute this query.
|
||||
//
|
||||
// When false (the default), the query will be planned, returning only
|
||||
// metrics from the planning stages.
|
||||
//
|
||||
// When true, the query will be planned and executed, returning the full
|
||||
// query results along with both planning and execution stage metrics.
|
||||
bool analyze = 1 [(google.api.field_behavior) = OPTIONAL];
|
||||
}
|
||||
|
||||
// Explain metrics for the query.
|
||||
message ExplainMetrics {
|
||||
// Planning phase information for the query.
|
||||
PlanSummary plan_summary = 1;
|
||||
|
||||
// Aggregated stats from the execution of the query. Only present when
|
||||
// [ExplainOptions.analyze][google.firestore.v1.ExplainOptions.analyze] is set
|
||||
// to true.
|
||||
ExecutionStats execution_stats = 2;
|
||||
}
|
||||
|
||||
// Planning phase information for the query.
|
||||
message PlanSummary {
|
||||
// The indexes selected for the query. For example:
|
||||
// [
|
||||
// {"query_scope": "Collection", "properties": "(foo ASC, __name__ ASC)"},
|
||||
// {"query_scope": "Collection", "properties": "(bar ASC, __name__ ASC)"}
|
||||
// ]
|
||||
repeated google.protobuf.Struct indexes_used = 1;
|
||||
}
|
||||
|
||||
// Execution statistics for the query.
|
||||
message ExecutionStats {
|
||||
// Total number of results returned, including documents, projections,
|
||||
// aggregation results, keys.
|
||||
int64 results_returned = 1;
|
||||
|
||||
// Total time to execute the query in the backend.
|
||||
google.protobuf.Duration execution_duration = 3;
|
||||
|
||||
// Total billable read operations.
|
||||
int64 read_operations = 4;
|
||||
|
||||
// Debugging statistics from the execution of the query. Note that the
|
||||
// debugging stats are subject to change as Firestore evolves. It could
|
||||
// include:
|
||||
// {
|
||||
// "indexes_entries_scanned": "1000",
|
||||
// "documents_scanned": "20",
|
||||
// "billing_details" : {
|
||||
// "documents_billable": "20",
|
||||
// "index_entries_billable": "1000",
|
||||
// "min_query_cost": "0"
|
||||
// }
|
||||
// }
|
||||
google.protobuf.Struct debug_stats = 5;
|
||||
}
|
||||
286
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1/write.proto
generated
vendored
Normal file
286
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1/write.proto
generated
vendored
Normal file
@@ -0,0 +1,286 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1;
|
||||
|
||||
import "google/firestore/v1/bloom_filter.proto";
|
||||
import "google/firestore/v1/common.proto";
|
||||
import "google/firestore/v1/document.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1/firestorepb;firestorepb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "WriteProto";
|
||||
option java_package = "com.google.firestore.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1";
|
||||
|
||||
// A write on a document.
|
||||
message Write {
|
||||
// The operation to execute.
|
||||
oneof operation {
|
||||
// A document to write.
|
||||
Document update = 1;
|
||||
|
||||
// A document name to delete. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
string delete = 2;
|
||||
|
||||
// Applies a transformation to a document.
|
||||
DocumentTransform transform = 6;
|
||||
}
|
||||
|
||||
// The fields to update in this write.
|
||||
//
|
||||
// This field can be set only when the operation is `update`.
|
||||
// If the mask is not set for an `update` and the document exists, any
|
||||
// existing data will be overwritten.
|
||||
// If the mask is set and the document on the server has fields not covered by
|
||||
// the mask, they are left unchanged.
|
||||
// Fields referenced in the mask, but not present in the input document, are
|
||||
// deleted from the document on the server.
|
||||
// The field paths in this mask must not contain a reserved field name.
|
||||
DocumentMask update_mask = 3;
|
||||
|
||||
// The transforms to perform after update.
|
||||
//
|
||||
// This field can be set only when the operation is `update`. If present, this
|
||||
// write is equivalent to performing `update` and `transform` to the same
|
||||
// document atomically and in order.
|
||||
repeated DocumentTransform.FieldTransform update_transforms = 7;
|
||||
|
||||
// An optional precondition on the document.
|
||||
//
|
||||
// The write will fail if this is set and not met by the target document.
|
||||
Precondition current_document = 4;
|
||||
}
|
||||
|
||||
// A transformation of a document.
|
||||
message DocumentTransform {
|
||||
// A transformation of a field of the document.
|
||||
message FieldTransform {
|
||||
// A value that is calculated by the server.
|
||||
enum ServerValue {
|
||||
// Unspecified. This value must not be used.
|
||||
SERVER_VALUE_UNSPECIFIED = 0;
|
||||
|
||||
// The time at which the server processed the request, with millisecond
|
||||
// precision. If used on multiple fields (same or different documents) in
|
||||
// a transaction, all the fields will get the same server timestamp.
|
||||
REQUEST_TIME = 1;
|
||||
}
|
||||
|
||||
// The path of the field. See
|
||||
// [Document.fields][google.firestore.v1.Document.fields] for the field path
|
||||
// syntax reference.
|
||||
string field_path = 1;
|
||||
|
||||
// The transformation to apply on the field.
|
||||
oneof transform_type {
|
||||
// Sets the field to the given server value.
|
||||
ServerValue set_to_server_value = 2;
|
||||
|
||||
// Adds the given value to the field's current value.
|
||||
//
|
||||
// This must be an integer or a double value.
|
||||
// If the field is not an integer or double, or if the field does not yet
|
||||
// exist, the transformation will set the field to the given value.
|
||||
// If either of the given value or the current field value are doubles,
|
||||
// both values will be interpreted as doubles. Double arithmetic and
|
||||
// representation of double values follow IEEE 754 semantics.
|
||||
// If there is positive/negative integer overflow, the field is resolved
|
||||
// to the largest magnitude positive/negative integer.
|
||||
Value increment = 3;
|
||||
|
||||
// Sets the field to the maximum of its current value and the given value.
|
||||
//
|
||||
// This must be an integer or a double value.
|
||||
// If the field is not an integer or double, or if the field does not yet
|
||||
// exist, the transformation will set the field to the given value.
|
||||
// If a maximum operation is applied where the field and the input value
|
||||
// are of mixed types (that is - one is an integer and one is a double)
|
||||
// the field takes on the type of the larger operand. If the operands are
|
||||
// equivalent (e.g. 3 and 3.0), the field does not change.
|
||||
// 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and
|
||||
// zero input value is always the stored value.
|
||||
// The maximum of any numeric value x and NaN is NaN.
|
||||
Value maximum = 4;
|
||||
|
||||
// Sets the field to the minimum of its current value and the given value.
|
||||
//
|
||||
// This must be an integer or a double value.
|
||||
// If the field is not an integer or double, or if the field does not yet
|
||||
// exist, the transformation will set the field to the input value.
|
||||
// If a minimum operation is applied where the field and the input value
|
||||
// are of mixed types (that is - one is an integer and one is a double)
|
||||
// the field takes on the type of the smaller operand. If the operands are
|
||||
// equivalent (e.g. 3 and 3.0), the field does not change.
|
||||
// 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and
|
||||
// zero input value is always the stored value.
|
||||
// The minimum of any numeric value x and NaN is NaN.
|
||||
Value minimum = 5;
|
||||
|
||||
// Append the given elements in order if they are not already present in
|
||||
// the current field value.
|
||||
// If the field is not an array, or if the field does not yet exist, it is
|
||||
// first set to the empty array.
|
||||
//
|
||||
// Equivalent numbers of different types (e.g. 3L and 3.0) are
|
||||
// considered equal when checking if a value is missing.
|
||||
// NaN is equal to NaN, and Null is equal to Null.
|
||||
// If the input contains multiple equivalent values, only the first will
|
||||
// be considered.
|
||||
//
|
||||
// The corresponding transform_result will be the null value.
|
||||
ArrayValue append_missing_elements = 6;
|
||||
|
||||
// Remove all of the given elements from the array in the field.
|
||||
// If the field is not an array, or if the field does not yet exist, it is
|
||||
// set to the empty array.
|
||||
//
|
||||
// Equivalent numbers of the different types (e.g. 3L and 3.0) are
|
||||
// considered equal when deciding whether an element should be removed.
|
||||
// NaN is equal to NaN, and Null is equal to Null.
|
||||
// This will remove all equivalent values if there are duplicates.
|
||||
//
|
||||
// The corresponding transform_result will be the null value.
|
||||
ArrayValue remove_all_from_array = 7;
|
||||
}
|
||||
}
|
||||
|
||||
// The name of the document to transform.
|
||||
string document = 1;
|
||||
|
||||
// The list of transformations to apply to the fields of the document, in
|
||||
// order.
|
||||
// This must not be empty.
|
||||
repeated FieldTransform field_transforms = 2;
|
||||
}
|
||||
|
||||
// The result of applying a write.
|
||||
message WriteResult {
|
||||
// The last update time of the document after applying the write. Not set
|
||||
// after a `delete`.
|
||||
//
|
||||
// If the write did not actually change the document, this will be the
|
||||
// previous update_time.
|
||||
google.protobuf.Timestamp update_time = 1;
|
||||
|
||||
// The results of applying each
|
||||
// [DocumentTransform.FieldTransform][google.firestore.v1.DocumentTransform.FieldTransform],
|
||||
// in the same order.
|
||||
repeated Value transform_results = 2;
|
||||
}
|
||||
|
||||
// A [Document][google.firestore.v1.Document] has changed.
|
||||
//
|
||||
// May be the result of multiple [writes][google.firestore.v1.Write], including
|
||||
// deletes, that ultimately resulted in a new value for the
|
||||
// [Document][google.firestore.v1.Document].
|
||||
//
|
||||
// Multiple [DocumentChange][google.firestore.v1.DocumentChange] messages may be
|
||||
// returned for the same logical change, if multiple targets are affected.
|
||||
message DocumentChange {
|
||||
// The new state of the [Document][google.firestore.v1.Document].
|
||||
//
|
||||
// If `mask` is set, contains only fields that were updated or added.
|
||||
Document document = 1;
|
||||
|
||||
// A set of target IDs of targets that match this document.
|
||||
repeated int32 target_ids = 5;
|
||||
|
||||
// A set of target IDs for targets that no longer match this document.
|
||||
repeated int32 removed_target_ids = 6;
|
||||
}
|
||||
|
||||
// A [Document][google.firestore.v1.Document] has been deleted.
|
||||
//
|
||||
// May be the result of multiple [writes][google.firestore.v1.Write], including
|
||||
// updates, the last of which deleted the
|
||||
// [Document][google.firestore.v1.Document].
|
||||
//
|
||||
// Multiple [DocumentDelete][google.firestore.v1.DocumentDelete] messages may be
|
||||
// returned for the same logical delete, if multiple targets are affected.
|
||||
message DocumentDelete {
|
||||
// The resource name of the [Document][google.firestore.v1.Document] that was
|
||||
// deleted.
|
||||
string document = 1;
|
||||
|
||||
// A set of target IDs for targets that previously matched this entity.
|
||||
repeated int32 removed_target_ids = 6;
|
||||
|
||||
// The read timestamp at which the delete was observed.
|
||||
//
|
||||
// Greater or equal to the `commit_time` of the delete.
|
||||
google.protobuf.Timestamp read_time = 4;
|
||||
}
|
||||
|
||||
// A [Document][google.firestore.v1.Document] has been removed from the view of
|
||||
// the targets.
|
||||
//
|
||||
// Sent if the document is no longer relevant to a target and is out of view.
|
||||
// Can be sent instead of a DocumentDelete or a DocumentChange if the server
|
||||
// can not send the new value of the document.
|
||||
//
|
||||
// Multiple [DocumentRemove][google.firestore.v1.DocumentRemove] messages may be
|
||||
// returned for the same logical write or delete, if multiple targets are
|
||||
// affected.
|
||||
message DocumentRemove {
|
||||
// The resource name of the [Document][google.firestore.v1.Document] that has
|
||||
// gone out of view.
|
||||
string document = 1;
|
||||
|
||||
// A set of target IDs for targets that previously matched this document.
|
||||
repeated int32 removed_target_ids = 2;
|
||||
|
||||
// The read timestamp at which the remove was observed.
|
||||
//
|
||||
// Greater or equal to the `commit_time` of the change/delete/remove.
|
||||
google.protobuf.Timestamp read_time = 4;
|
||||
}
|
||||
|
||||
// A digest of all the documents that match a given target.
|
||||
message ExistenceFilter {
|
||||
// The target ID to which this filter applies.
|
||||
int32 target_id = 1;
|
||||
|
||||
// The total count of documents that match
|
||||
// [target_id][google.firestore.v1.ExistenceFilter.target_id].
|
||||
//
|
||||
// If different from the count of documents in the client that match, the
|
||||
// client must manually determine which documents no longer match the target.
|
||||
//
|
||||
// The client can use the `unchanged_names` bloom filter to assist with
|
||||
// this determination by testing ALL the document names against the filter;
|
||||
// if the document name is NOT in the filter, it means the document no
|
||||
// longer matches the target.
|
||||
int32 count = 2;
|
||||
|
||||
// A bloom filter that, despite its name, contains the UTF-8 byte encodings of
|
||||
// the resource names of ALL the documents that match
|
||||
// [target_id][google.firestore.v1.ExistenceFilter.target_id], in the form
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
//
|
||||
// This bloom filter may be omitted at the server's discretion, such as if it
|
||||
// is deemed that the client will not make use of it or if it is too
|
||||
// computationally expensive to calculate or transmit. Clients must gracefully
|
||||
// handle this field being absent by falling back to the logic used before
|
||||
// this field existed; that is, re-add the target without a resume token to
|
||||
// figure out which documents in the client's cache are out of sync.
|
||||
BloomFilter unchanged_names = 3;
|
||||
}
|
||||
82
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1beta1/common.proto
generated
vendored
Normal file
82
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1beta1/common.proto
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1beta1;
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1Beta1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1beta1/firestorepb;firestorepb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "CommonProto";
|
||||
option java_package = "com.google.firestore.v1beta1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1beta1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1beta1";
|
||||
|
||||
// A set of field paths on a document.
|
||||
// Used to restrict a get or update operation on a document to a subset of its
|
||||
// fields.
|
||||
// This is different from standard field masks, as this is always scoped to a
|
||||
// [Document][google.firestore.v1beta1.Document], and takes in account the dynamic nature of [Value][google.firestore.v1beta1.Value].
|
||||
message DocumentMask {
|
||||
// The list of field paths in the mask. See [Document.fields][google.firestore.v1beta1.Document.fields] for a field
|
||||
// path syntax reference.
|
||||
repeated string field_paths = 1;
|
||||
}
|
||||
|
||||
// A precondition on a document, used for conditional operations.
|
||||
message Precondition {
|
||||
// The type of precondition.
|
||||
oneof condition_type {
|
||||
// When set to `true`, the target document must exist.
|
||||
// When set to `false`, the target document must not exist.
|
||||
bool exists = 1;
|
||||
|
||||
// When set, the target document must exist and have been last updated at
|
||||
// that time.
|
||||
google.protobuf.Timestamp update_time = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Options for creating a new transaction.
|
||||
message TransactionOptions {
|
||||
// Options for a transaction that can be used to read and write documents.
|
||||
message ReadWrite {
|
||||
// An optional transaction to retry.
|
||||
bytes retry_transaction = 1;
|
||||
}
|
||||
|
||||
// Options for a transaction that can only be used to read documents.
|
||||
message ReadOnly {
|
||||
// The consistency mode for this transaction. If not set, defaults to strong
|
||||
// consistency.
|
||||
oneof consistency_selector {
|
||||
// Reads documents at the given time.
|
||||
// This may not be older than 60 seconds.
|
||||
google.protobuf.Timestamp read_time = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// The mode of the transaction.
|
||||
oneof mode {
|
||||
// The transaction can only be used for read operations.
|
||||
ReadOnly read_only = 2;
|
||||
|
||||
// The transaction can be used for both read and write operations.
|
||||
ReadWrite read_write = 3;
|
||||
}
|
||||
}
|
||||
149
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1beta1/document.proto
generated
vendored
Normal file
149
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1beta1/document.proto
generated
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1beta1;
|
||||
|
||||
import "google/protobuf/struct.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/type/latlng.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1Beta1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1beta1/firestorepb;firestorepb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "DocumentProto";
|
||||
option java_package = "com.google.firestore.v1beta1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1beta1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1beta1";
|
||||
|
||||
// A Firestore document.
|
||||
//
|
||||
// Must not exceed 1 MiB - 4 bytes.
|
||||
message Document {
|
||||
// The resource name of the document, for example
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
string name = 1;
|
||||
|
||||
// The document's fields.
|
||||
//
|
||||
// The map keys represent field names.
|
||||
//
|
||||
// A simple field name contains only characters `a` to `z`, `A` to `Z`,
|
||||
// `0` to `9`, or `_`, and must not start with `0` to `9`. For example,
|
||||
// `foo_bar_17`.
|
||||
//
|
||||
// Field names matching the regular expression `__.*__` are reserved. Reserved
|
||||
// field names are forbidden except in certain documented contexts. The map
|
||||
// keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be
|
||||
// empty.
|
||||
//
|
||||
// Field paths may be used in other contexts to refer to structured fields
|
||||
// defined here. For `map_value`, the field path is represented by the simple
|
||||
// or quoted field names of the containing fields, delimited by `.`. For
|
||||
// example, the structured field
|
||||
// `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be
|
||||
// represented by the field path `foo.x&y`.
|
||||
//
|
||||
// Within a field path, a quoted field name starts and ends with `` ` `` and
|
||||
// may contain any character. Some characters, including `` ` ``, must be
|
||||
// escaped using a `\`. For example, `` `x&y` `` represents `x&y` and
|
||||
// `` `bak\`tik` `` represents `` bak`tik ``.
|
||||
map<string, Value> fields = 2;
|
||||
|
||||
// Output only. The time at which the document was created.
|
||||
//
|
||||
// This value increases monotonically when a document is deleted then
|
||||
// recreated. It can also be compared to values from other documents and
|
||||
// the `read_time` of a query.
|
||||
google.protobuf.Timestamp create_time = 3;
|
||||
|
||||
// Output only. The time at which the document was last changed.
|
||||
//
|
||||
// This value is initially set to the `create_time` then increases
|
||||
// monotonically with each change to the document. It can also be
|
||||
// compared to values from other documents and the `read_time` of a query.
|
||||
google.protobuf.Timestamp update_time = 4;
|
||||
}
|
||||
|
||||
// A message that can hold any of the supported value types.
|
||||
message Value {
|
||||
// Must have a value set.
|
||||
oneof value_type {
|
||||
// A null value.
|
||||
google.protobuf.NullValue null_value = 11;
|
||||
|
||||
// A boolean value.
|
||||
bool boolean_value = 1;
|
||||
|
||||
// An integer value.
|
||||
int64 integer_value = 2;
|
||||
|
||||
// A double value.
|
||||
double double_value = 3;
|
||||
|
||||
// A timestamp value.
|
||||
//
|
||||
// Precise only to microseconds. When stored, any additional precision is
|
||||
// rounded down.
|
||||
google.protobuf.Timestamp timestamp_value = 10;
|
||||
|
||||
// A string value.
|
||||
//
|
||||
// The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes.
|
||||
// Only the first 1,500 bytes of the UTF-8 representation are considered by
|
||||
// queries.
|
||||
string string_value = 17;
|
||||
|
||||
// A bytes value.
|
||||
//
|
||||
// Must not exceed 1 MiB - 89 bytes.
|
||||
// Only the first 1,500 bytes are considered by queries.
|
||||
bytes bytes_value = 18;
|
||||
|
||||
// A reference to a document. For example:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
string reference_value = 5;
|
||||
|
||||
// A geo point value representing a point on the surface of Earth.
|
||||
google.type.LatLng geo_point_value = 8;
|
||||
|
||||
// An array value.
|
||||
//
|
||||
// Cannot directly contain another array value, though can contain an
|
||||
// map which contains another array.
|
||||
ArrayValue array_value = 9;
|
||||
|
||||
// A map value.
|
||||
MapValue map_value = 6;
|
||||
}
|
||||
}
|
||||
|
||||
// An array value.
|
||||
message ArrayValue {
|
||||
// Values in the array.
|
||||
repeated Value values = 1;
|
||||
}
|
||||
|
||||
// A map value.
|
||||
message MapValue {
|
||||
// The map's fields.
|
||||
//
|
||||
// The map keys represent field names. Field names matching the regular
|
||||
// expression `__.*__` are reserved. Reserved field names are forbidden except
|
||||
// in certain documented contexts. The map keys, represented as UTF-8, must
|
||||
// not exceed 1,500 bytes and cannot be empty.
|
||||
map<string, Value> fields = 1;
|
||||
}
|
||||
900
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1beta1/firestore.proto
generated
vendored
Normal file
900
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1beta1/firestore.proto
generated
vendored
Normal file
@@ -0,0 +1,900 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1beta1;
|
||||
|
||||
import "google/api/annotations.proto";
|
||||
import "google/api/client.proto";
|
||||
import "google/api/field_behavior.proto";
|
||||
import "google/firestore/v1beta1/common.proto";
|
||||
import "google/firestore/v1beta1/document.proto";
|
||||
import "google/firestore/v1beta1/query.proto";
|
||||
import "google/firestore/v1beta1/write.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/rpc/status.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1Beta1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1beta1/firestorepb;firestorepb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "FirestoreProto";
|
||||
option java_package = "com.google.firestore.v1beta1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1beta1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1beta1";
|
||||
|
||||
// Specification of the Firestore API.
|
||||
|
||||
// The Cloud Firestore service.
|
||||
//
|
||||
// Cloud Firestore is a fast, fully managed, serverless, cloud-native NoSQL
|
||||
// document database that simplifies storing, syncing, and querying data for
|
||||
// your mobile, web, and IoT apps at global scale. Its client libraries provide
|
||||
// live synchronization and offline support, while its security features and
|
||||
// integrations with Firebase and Google Cloud Platform (GCP) accelerate
|
||||
// building truly serverless apps.
|
||||
service Firestore {
|
||||
option (google.api.default_host) = "firestore.googleapis.com";
|
||||
option (google.api.oauth_scopes) =
|
||||
"https://www.googleapis.com/auth/cloud-platform,"
|
||||
"https://www.googleapis.com/auth/datastore";
|
||||
|
||||
// Gets a single document.
|
||||
rpc GetDocument(GetDocumentRequest) returns (Document) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1beta1/{name=projects/*/databases/*/documents/*/**}"
|
||||
};
|
||||
}
|
||||
|
||||
// Lists documents.
|
||||
rpc ListDocuments(ListDocumentsRequest) returns (ListDocumentsResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1beta1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}"
|
||||
};
|
||||
}
|
||||
|
||||
// Updates or inserts a document.
|
||||
rpc UpdateDocument(UpdateDocumentRequest) returns (Document) {
|
||||
option (google.api.http) = {
|
||||
patch: "/v1beta1/{document.name=projects/*/databases/*/documents/*/**}"
|
||||
body: "document"
|
||||
};
|
||||
option (google.api.method_signature) = "document,update_mask";
|
||||
}
|
||||
|
||||
// Deletes a document.
|
||||
rpc DeleteDocument(DeleteDocumentRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
delete: "/v1beta1/{name=projects/*/databases/*/documents/*/**}"
|
||||
};
|
||||
option (google.api.method_signature) = "name";
|
||||
}
|
||||
|
||||
// Gets multiple documents.
|
||||
//
|
||||
// Documents returned by this method are not guaranteed to be returned in the
|
||||
// same order that they were requested.
|
||||
rpc BatchGetDocuments(BatchGetDocumentsRequest) returns (stream BatchGetDocumentsResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1beta1/{database=projects/*/databases/*}/documents:batchGet"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Starts a new transaction.
|
||||
rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1beta1/{database=projects/*/databases/*}/documents:beginTransaction"
|
||||
body: "*"
|
||||
};
|
||||
option (google.api.method_signature) = "database";
|
||||
}
|
||||
|
||||
// Commits a transaction, while optionally updating documents.
|
||||
rpc Commit(CommitRequest) returns (CommitResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1beta1/{database=projects/*/databases/*}/documents:commit"
|
||||
body: "*"
|
||||
};
|
||||
option (google.api.method_signature) = "database,writes";
|
||||
}
|
||||
|
||||
// Rolls back a transaction.
|
||||
rpc Rollback(RollbackRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1beta1/{database=projects/*/databases/*}/documents:rollback"
|
||||
body: "*"
|
||||
};
|
||||
option (google.api.method_signature) = "database,transaction";
|
||||
}
|
||||
|
||||
// Runs a query.
|
||||
rpc RunQuery(RunQueryRequest) returns (stream RunQueryResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1beta1/{parent=projects/*/databases/*/documents}:runQuery"
|
||||
body: "*"
|
||||
additional_bindings {
|
||||
post: "/v1beta1/{parent=projects/*/databases/*/documents/*/**}:runQuery"
|
||||
body: "*"
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Partitions a query by returning partition cursors that can be used to run
|
||||
// the query in parallel. The returned partition cursors are split points that
|
||||
// can be used by RunQuery as starting/end points for the query results.
|
||||
rpc PartitionQuery(PartitionQueryRequest) returns (PartitionQueryResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1beta1/{parent=projects/*/databases/*/documents}:partitionQuery"
|
||||
body: "*"
|
||||
additional_bindings {
|
||||
post: "/v1beta1/{parent=projects/*/databases/*/documents/*/**}:partitionQuery"
|
||||
body: "*"
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Streams batches of document updates and deletes, in order.
|
||||
rpc Write(stream WriteRequest) returns (stream WriteResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1beta1/{database=projects/*/databases/*}/documents:write"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Listens to changes.
|
||||
rpc Listen(stream ListenRequest) returns (stream ListenResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1beta1/{database=projects/*/databases/*}/documents:listen"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Lists all the collection IDs underneath a document.
|
||||
rpc ListCollectionIds(ListCollectionIdsRequest) returns (ListCollectionIdsResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1beta1/{parent=projects/*/databases/*/documents}:listCollectionIds"
|
||||
body: "*"
|
||||
additional_bindings {
|
||||
post: "/v1beta1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds"
|
||||
body: "*"
|
||||
}
|
||||
};
|
||||
option (google.api.method_signature) = "parent";
|
||||
}
|
||||
|
||||
// Applies a batch of write operations.
|
||||
//
|
||||
// The BatchWrite method does not apply the write operations atomically
|
||||
// and can apply them out of order. Method does not allow more than one write
|
||||
// per document. Each write succeeds or fails independently. See the
|
||||
// [BatchWriteResponse][google.firestore.v1beta1.BatchWriteResponse] for the success status of each write.
|
||||
//
|
||||
// If you require an atomically applied set of writes, use
|
||||
// [Commit][google.firestore.v1beta1.Firestore.Commit] instead.
|
||||
rpc BatchWrite(BatchWriteRequest) returns (BatchWriteResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1beta1/{database=projects/*/databases/*}/documents:batchWrite"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Creates a new document.
|
||||
rpc CreateDocument(CreateDocumentRequest) returns (Document) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1beta1/{parent=projects/*/databases/*/documents/**}/{collection_id}"
|
||||
body: "document"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// The request for [Firestore.GetDocument][google.firestore.v1beta1.Firestore.GetDocument].
|
||||
message GetDocumentRequest {
|
||||
// Required. The resource name of the Document to get. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
string name = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The fields to return. If not set, returns all fields.
|
||||
//
|
||||
// If the document has a field that is not present in this mask, that field
|
||||
// will not be returned in the response.
|
||||
DocumentMask mask = 2;
|
||||
|
||||
// The consistency mode for this transaction.
|
||||
// If not set, defaults to strong consistency.
|
||||
oneof consistency_selector {
|
||||
// Reads the document in a transaction.
|
||||
bytes transaction = 3;
|
||||
|
||||
// Reads the version of the document at the given time.
|
||||
// This may not be older than 270 seconds.
|
||||
google.protobuf.Timestamp read_time = 5;
|
||||
}
|
||||
}
|
||||
|
||||
// The request for [Firestore.ListDocuments][google.firestore.v1beta1.Firestore.ListDocuments].
|
||||
message ListDocumentsRequest {
|
||||
// Required. The parent resource name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents` or
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
// For example:
|
||||
// `projects/my-project/databases/my-database/documents` or
|
||||
// `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`
|
||||
string parent = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Required. The collection ID, relative to `parent`, to list. For example: `chatrooms`
|
||||
// or `messages`.
|
||||
string collection_id = 2 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The maximum number of documents to return.
|
||||
int32 page_size = 3;
|
||||
|
||||
// The `next_page_token` value returned from a previous List request, if any.
|
||||
string page_token = 4;
|
||||
|
||||
// The order to sort results by. For example: `priority desc, name`.
|
||||
string order_by = 6;
|
||||
|
||||
// The fields to return. If not set, returns all fields.
|
||||
//
|
||||
// If a document has a field that is not present in this mask, that field
|
||||
// will not be returned in the response.
|
||||
DocumentMask mask = 7;
|
||||
|
||||
// The consistency mode for this transaction.
|
||||
// If not set, defaults to strong consistency.
|
||||
oneof consistency_selector {
|
||||
// Reads documents in a transaction.
|
||||
bytes transaction = 8;
|
||||
|
||||
// Reads documents as they were at the given time.
|
||||
// This may not be older than 270 seconds.
|
||||
google.protobuf.Timestamp read_time = 10;
|
||||
}
|
||||
|
||||
// If the list should show missing documents. A missing document is a
|
||||
// document that does not exist but has sub-documents. These documents will
|
||||
// be returned with a key but will not have fields, [Document.create_time][google.firestore.v1beta1.Document.create_time],
|
||||
// or [Document.update_time][google.firestore.v1beta1.Document.update_time] set.
|
||||
//
|
||||
// Requests with `show_missing` may not specify `where` or
|
||||
// `order_by`.
|
||||
bool show_missing = 12;
|
||||
}
|
||||
|
||||
// The response for [Firestore.ListDocuments][google.firestore.v1beta1.Firestore.ListDocuments].
|
||||
message ListDocumentsResponse {
|
||||
// The Documents found.
|
||||
repeated Document documents = 1;
|
||||
|
||||
// The next page token.
|
||||
string next_page_token = 2;
|
||||
}
|
||||
|
||||
// The request for [Firestore.CreateDocument][google.firestore.v1beta1.Firestore.CreateDocument].
|
||||
message CreateDocumentRequest {
|
||||
// Required. The parent resource. For example:
|
||||
// `projects/{project_id}/databases/{database_id}/documents` or
|
||||
// `projects/{project_id}/databases/{database_id}/documents/chatrooms/{chatroom_id}`
|
||||
string parent = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Required. The collection ID, relative to `parent`, to list. For example: `chatrooms`.
|
||||
string collection_id = 2 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The client-assigned document ID to use for this document.
|
||||
//
|
||||
// Optional. If not specified, an ID will be assigned by the service.
|
||||
string document_id = 3;
|
||||
|
||||
// Required. The document to create. `name` must not be set.
|
||||
Document document = 4 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The fields to return. If not set, returns all fields.
|
||||
//
|
||||
// If the document has a field that is not present in this mask, that field
|
||||
// will not be returned in the response.
|
||||
DocumentMask mask = 5;
|
||||
}
|
||||
|
||||
// The request for [Firestore.UpdateDocument][google.firestore.v1beta1.Firestore.UpdateDocument].
|
||||
message UpdateDocumentRequest {
|
||||
// Required. The updated document.
|
||||
// Creates the document if it does not already exist.
|
||||
Document document = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The fields to update.
|
||||
// None of the field paths in the mask may contain a reserved name.
|
||||
//
|
||||
// If the document exists on the server and has fields not referenced in the
|
||||
// mask, they are left unchanged.
|
||||
// Fields referenced in the mask, but not present in the input document, are
|
||||
// deleted from the document on the server.
|
||||
DocumentMask update_mask = 2;
|
||||
|
||||
// The fields to return. If not set, returns all fields.
|
||||
//
|
||||
// If the document has a field that is not present in this mask, that field
|
||||
// will not be returned in the response.
|
||||
DocumentMask mask = 3;
|
||||
|
||||
// An optional precondition on the document.
|
||||
// The request will fail if this is set and not met by the target document.
|
||||
Precondition current_document = 4;
|
||||
}
|
||||
|
||||
// The request for [Firestore.DeleteDocument][google.firestore.v1beta1.Firestore.DeleteDocument].
|
||||
message DeleteDocumentRequest {
|
||||
// Required. The resource name of the Document to delete. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
string name = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// An optional precondition on the document.
|
||||
// The request will fail if this is set and not met by the target document.
|
||||
Precondition current_document = 2;
|
||||
}
|
||||
|
||||
// The request for [Firestore.BatchGetDocuments][google.firestore.v1beta1.Firestore.BatchGetDocuments].
|
||||
message BatchGetDocumentsRequest {
|
||||
// Required. The database name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}`.
|
||||
string database = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The names of the documents to retrieve. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
// The request will fail if any of the document is not a child resource of the
|
||||
// given `database`. Duplicate names will be elided.
|
||||
repeated string documents = 2;
|
||||
|
||||
// The fields to return. If not set, returns all fields.
|
||||
//
|
||||
// If a document has a field that is not present in this mask, that field will
|
||||
// not be returned in the response.
|
||||
DocumentMask mask = 3;
|
||||
|
||||
// The consistency mode for this transaction.
|
||||
// If not set, defaults to strong consistency.
|
||||
oneof consistency_selector {
|
||||
// Reads documents in a transaction.
|
||||
bytes transaction = 4;
|
||||
|
||||
// Starts a new transaction and reads the documents.
|
||||
// Defaults to a read-only transaction.
|
||||
// The new transaction ID will be returned as the first response in the
|
||||
// stream.
|
||||
TransactionOptions new_transaction = 5;
|
||||
|
||||
// Reads documents as they were at the given time.
|
||||
// This may not be older than 270 seconds.
|
||||
google.protobuf.Timestamp read_time = 7;
|
||||
}
|
||||
}
|
||||
|
||||
// The streamed response for [Firestore.BatchGetDocuments][google.firestore.v1beta1.Firestore.BatchGetDocuments].
|
||||
message BatchGetDocumentsResponse {
|
||||
// A single result.
|
||||
// This can be empty if the server is just returning a transaction.
|
||||
oneof result {
|
||||
// A document that was requested.
|
||||
Document found = 1;
|
||||
|
||||
// A document name that was requested but does not exist. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
string missing = 2;
|
||||
}
|
||||
|
||||
// The transaction that was started as part of this request.
|
||||
// Will only be set in the first response, and only if
|
||||
// [BatchGetDocumentsRequest.new_transaction][google.firestore.v1beta1.BatchGetDocumentsRequest.new_transaction] was set in the request.
|
||||
bytes transaction = 3;
|
||||
|
||||
// The time at which the document was read.
|
||||
// This may be monotically increasing, in this case the previous documents in
|
||||
// the result stream are guaranteed not to have changed between their
|
||||
// read_time and this one.
|
||||
google.protobuf.Timestamp read_time = 4;
|
||||
}
|
||||
|
||||
// The request for [Firestore.BeginTransaction][google.firestore.v1beta1.Firestore.BeginTransaction].
|
||||
message BeginTransactionRequest {
|
||||
// Required. The database name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}`.
|
||||
string database = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The options for the transaction.
|
||||
// Defaults to a read-write transaction.
|
||||
TransactionOptions options = 2;
|
||||
}
|
||||
|
||||
// The response for [Firestore.BeginTransaction][google.firestore.v1beta1.Firestore.BeginTransaction].
|
||||
message BeginTransactionResponse {
|
||||
// The transaction that was started.
|
||||
bytes transaction = 1;
|
||||
}
|
||||
|
||||
// The request for [Firestore.Commit][google.firestore.v1beta1.Firestore.Commit].
|
||||
message CommitRequest {
|
||||
// Required. The database name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}`.
|
||||
string database = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The writes to apply.
|
||||
//
|
||||
// Always executed atomically and in order.
|
||||
repeated Write writes = 2;
|
||||
|
||||
// If set, applies all writes in this transaction, and commits it.
|
||||
bytes transaction = 3;
|
||||
}
|
||||
|
||||
// The response for [Firestore.Commit][google.firestore.v1beta1.Firestore.Commit].
|
||||
message CommitResponse {
|
||||
// The result of applying the writes.
|
||||
//
|
||||
// This i-th write result corresponds to the i-th write in the
|
||||
// request.
|
||||
repeated WriteResult write_results = 1;
|
||||
|
||||
// The time at which the commit occurred. Any read with an equal or greater
|
||||
// `read_time` is guaranteed to see the effects of the commit.
|
||||
google.protobuf.Timestamp commit_time = 2;
|
||||
}
|
||||
|
||||
// The request for [Firestore.Rollback][google.firestore.v1beta1.Firestore.Rollback].
|
||||
message RollbackRequest {
|
||||
// Required. The database name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}`.
|
||||
string database = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Required. The transaction to roll back.
|
||||
bytes transaction = 2 [(google.api.field_behavior) = REQUIRED];
|
||||
}
|
||||
|
||||
// The request for [Firestore.RunQuery][google.firestore.v1beta1.Firestore.RunQuery].
|
||||
message RunQueryRequest {
|
||||
// Required. The parent resource name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents` or
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
// For example:
|
||||
// `projects/my-project/databases/my-database/documents` or
|
||||
// `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`
|
||||
string parent = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The query to run.
|
||||
oneof query_type {
|
||||
// A structured query.
|
||||
StructuredQuery structured_query = 2;
|
||||
}
|
||||
|
||||
// The consistency mode for this transaction.
|
||||
// If not set, defaults to strong consistency.
|
||||
oneof consistency_selector {
|
||||
// Reads documents in a transaction.
|
||||
bytes transaction = 5;
|
||||
|
||||
// Starts a new transaction and reads the documents.
|
||||
// Defaults to a read-only transaction.
|
||||
// The new transaction ID will be returned as the first response in the
|
||||
// stream.
|
||||
TransactionOptions new_transaction = 6;
|
||||
|
||||
// Reads documents as they were at the given time.
|
||||
// This may not be older than 270 seconds.
|
||||
google.protobuf.Timestamp read_time = 7;
|
||||
}
|
||||
}
|
||||
|
||||
// The response for [Firestore.RunQuery][google.firestore.v1beta1.Firestore.RunQuery].
|
||||
message RunQueryResponse {
|
||||
// The transaction that was started as part of this request.
|
||||
// Can only be set in the first response, and only if
|
||||
// [RunQueryRequest.new_transaction][google.firestore.v1beta1.RunQueryRequest.new_transaction] was set in the request.
|
||||
// If set, no other fields will be set in this response.
|
||||
bytes transaction = 2;
|
||||
|
||||
// A query result.
|
||||
// Not set when reporting partial progress.
|
||||
Document document = 1;
|
||||
|
||||
// The time at which the document was read. This may be monotonically
|
||||
// increasing; in this case, the previous documents in the result stream are
|
||||
// guaranteed not to have changed between their `read_time` and this one.
|
||||
//
|
||||
// If the query returns no results, a response with `read_time` and no
|
||||
// `document` will be sent, and this represents the time at which the query
|
||||
// was run.
|
||||
google.protobuf.Timestamp read_time = 3;
|
||||
|
||||
// The number of results that have been skipped due to an offset between
|
||||
// the last response and the current response.
|
||||
int32 skipped_results = 4;
|
||||
}
|
||||
|
||||
// The request for [Firestore.PartitionQuery][google.firestore.v1beta1.Firestore.PartitionQuery].
|
||||
message PartitionQueryRequest {
|
||||
// Required. The parent resource name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents`.
|
||||
// Document resource names are not supported; only database resource names
|
||||
// can be specified.
|
||||
string parent = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The query to partition.
|
||||
oneof query_type {
|
||||
// A structured query.
|
||||
// Query must specify collection with all descendants and be ordered by name
|
||||
// ascending. Other filters, order bys, limits, offsets, and start/end
|
||||
// cursors are not supported.
|
||||
StructuredQuery structured_query = 2;
|
||||
}
|
||||
|
||||
// The desired maximum number of partition points.
|
||||
// The partitions may be returned across multiple pages of results.
|
||||
// The number must be positive. The actual number of partitions
|
||||
// returned may be fewer.
|
||||
//
|
||||
// For example, this may be set to one fewer than the number of parallel
|
||||
// queries to be run, or in running a data pipeline job, one fewer than the
|
||||
// number of workers or compute instances available.
|
||||
int64 partition_count = 3;
|
||||
|
||||
// The `next_page_token` value returned from a previous call to
|
||||
// PartitionQuery that may be used to get an additional set of results.
|
||||
// There are no ordering guarantees between sets of results. Thus, using
|
||||
// multiple sets of results will require merging the different result sets.
|
||||
//
|
||||
// For example, two subsequent calls using a page_token may return:
|
||||
//
|
||||
// * cursor B, cursor M, cursor Q
|
||||
// * cursor A, cursor U, cursor W
|
||||
//
|
||||
// To obtain a complete result set ordered with respect to the results of the
|
||||
// query supplied to PartitionQuery, the results sets should be merged:
|
||||
// cursor A, cursor B, cursor M, cursor Q, cursor U, cursor W
|
||||
string page_token = 4;
|
||||
|
||||
// The maximum number of partitions to return in this call, subject to
|
||||
// `partition_count`.
|
||||
//
|
||||
// For example, if `partition_count` = 10 and `page_size` = 8, the first call
|
||||
// to PartitionQuery will return up to 8 partitions and a `next_page_token`
|
||||
// if more results exist. A second call to PartitionQuery will return up to
|
||||
// 2 partitions, to complete the total of 10 specified in `partition_count`.
|
||||
int32 page_size = 5;
|
||||
}
|
||||
|
||||
// The response for [Firestore.PartitionQuery][google.firestore.v1beta1.Firestore.PartitionQuery].
|
||||
message PartitionQueryResponse {
|
||||
// Partition results.
|
||||
// Each partition is a split point that can be used by RunQuery as a starting
|
||||
// or end point for the query results. The RunQuery requests must be made with
|
||||
// the same query supplied to this PartitionQuery request. The partition
|
||||
// cursors will be ordered according to same ordering as the results of the
|
||||
// query supplied to PartitionQuery.
|
||||
//
|
||||
// For example, if a PartitionQuery request returns partition cursors A and B,
|
||||
// running the following three queries will return the entire result set of
|
||||
// the original query:
|
||||
//
|
||||
// * query, end_at A
|
||||
// * query, start_at A, end_at B
|
||||
// * query, start_at B
|
||||
//
|
||||
// An empty result may indicate that the query has too few results to be
|
||||
// partitioned.
|
||||
repeated Cursor partitions = 1;
|
||||
|
||||
// A page token that may be used to request an additional set of results, up
|
||||
// to the number specified by `partition_count` in the PartitionQuery request.
|
||||
// If blank, there are no more results.
|
||||
string next_page_token = 2;
|
||||
}
|
||||
|
||||
// The request for [Firestore.Write][google.firestore.v1beta1.Firestore.Write].
|
||||
//
|
||||
// The first request creates a stream, or resumes an existing one from a token.
|
||||
//
|
||||
// When creating a new stream, the server replies with a response containing
|
||||
// only an ID and a token, to use in the next request.
|
||||
//
|
||||
// When resuming a stream, the server first streams any responses later than the
|
||||
// given token, then a response containing only an up-to-date token, to use in
|
||||
// the next request.
|
||||
message WriteRequest {
|
||||
// Required. The database name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}`.
|
||||
// This is only required in the first message.
|
||||
string database = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The ID of the write stream to resume.
|
||||
// This may only be set in the first message. When left empty, a new write
|
||||
// stream will be created.
|
||||
string stream_id = 2;
|
||||
|
||||
// The writes to apply.
|
||||
//
|
||||
// Always executed atomically and in order.
|
||||
// This must be empty on the first request.
|
||||
// This may be empty on the last request.
|
||||
// This must not be empty on all other requests.
|
||||
repeated Write writes = 3;
|
||||
|
||||
// A stream token that was previously sent by the server.
|
||||
//
|
||||
// The client should set this field to the token from the most recent
|
||||
// [WriteResponse][google.firestore.v1beta1.WriteResponse] it has received. This acknowledges that the client has
|
||||
// received responses up to this token. After sending this token, earlier
|
||||
// tokens may not be used anymore.
|
||||
//
|
||||
// The server may close the stream if there are too many unacknowledged
|
||||
// responses.
|
||||
//
|
||||
// Leave this field unset when creating a new stream. To resume a stream at
|
||||
// a specific point, set this field and the `stream_id` field.
|
||||
//
|
||||
// Leave this field unset when creating a new stream.
|
||||
bytes stream_token = 4;
|
||||
|
||||
// Labels associated with this write request.
|
||||
map<string, string> labels = 5;
|
||||
}
|
||||
|
||||
// The response for [Firestore.Write][google.firestore.v1beta1.Firestore.Write].
|
||||
message WriteResponse {
|
||||
// The ID of the stream.
|
||||
// Only set on the first message, when a new stream was created.
|
||||
string stream_id = 1;
|
||||
|
||||
// A token that represents the position of this response in the stream.
|
||||
// This can be used by a client to resume the stream at this point.
|
||||
//
|
||||
// This field is always set.
|
||||
bytes stream_token = 2;
|
||||
|
||||
// The result of applying the writes.
|
||||
//
|
||||
// This i-th write result corresponds to the i-th write in the
|
||||
// request.
|
||||
repeated WriteResult write_results = 3;
|
||||
|
||||
// The time at which the commit occurred. Any read with an equal or greater
|
||||
// `read_time` is guaranteed to see the effects of the write.
|
||||
google.protobuf.Timestamp commit_time = 4;
|
||||
}
|
||||
|
||||
// A request for [Firestore.Listen][google.firestore.v1beta1.Firestore.Listen]
|
||||
message ListenRequest {
|
||||
// Required. The database name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}`.
|
||||
string database = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The supported target changes.
|
||||
oneof target_change {
|
||||
// A target to add to this stream.
|
||||
Target add_target = 2;
|
||||
|
||||
// The ID of a target to remove from this stream.
|
||||
int32 remove_target = 3;
|
||||
}
|
||||
|
||||
// Labels associated with this target change.
|
||||
map<string, string> labels = 4;
|
||||
}
|
||||
|
||||
// The response for [Firestore.Listen][google.firestore.v1beta1.Firestore.Listen].
|
||||
message ListenResponse {
|
||||
// The supported responses.
|
||||
oneof response_type {
|
||||
// Targets have changed.
|
||||
TargetChange target_change = 2;
|
||||
|
||||
// A [Document][google.firestore.v1beta1.Document] has changed.
|
||||
DocumentChange document_change = 3;
|
||||
|
||||
// A [Document][google.firestore.v1beta1.Document] has been deleted.
|
||||
DocumentDelete document_delete = 4;
|
||||
|
||||
// A [Document][google.firestore.v1beta1.Document] has been removed from a target (because it is no longer
|
||||
// relevant to that target).
|
||||
DocumentRemove document_remove = 6;
|
||||
|
||||
// A filter to apply to the set of documents previously returned for the
|
||||
// given target.
|
||||
//
|
||||
// Returned when documents may have been removed from the given target, but
|
||||
// the exact documents are unknown.
|
||||
ExistenceFilter filter = 5;
|
||||
}
|
||||
}
|
||||
|
||||
// A specification of a set of documents to listen to.
|
||||
message Target {
|
||||
// A target specified by a set of documents names.
|
||||
message DocumentsTarget {
|
||||
// The names of the documents to retrieve. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
// The request will fail if any of the document is not a child resource of
|
||||
// the given `database`. Duplicate names will be elided.
|
||||
repeated string documents = 2;
|
||||
}
|
||||
|
||||
// A target specified by a query.
|
||||
message QueryTarget {
|
||||
// The parent resource name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents` or
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
// For example:
|
||||
// `projects/my-project/databases/my-database/documents` or
|
||||
// `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`
|
||||
string parent = 1;
|
||||
|
||||
// The query to run.
|
||||
oneof query_type {
|
||||
// A structured query.
|
||||
StructuredQuery structured_query = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// The type of target to listen to.
|
||||
oneof target_type {
|
||||
// A target specified by a query.
|
||||
QueryTarget query = 2;
|
||||
|
||||
// A target specified by a set of document names.
|
||||
DocumentsTarget documents = 3;
|
||||
}
|
||||
|
||||
// When to start listening.
|
||||
//
|
||||
// If not specified, all matching Documents are returned before any
|
||||
// subsequent changes.
|
||||
oneof resume_type {
|
||||
// A resume token from a prior [TargetChange][google.firestore.v1beta1.TargetChange] for an identical target.
|
||||
//
|
||||
// Using a resume token with a different target is unsupported and may fail.
|
||||
bytes resume_token = 4;
|
||||
|
||||
// Start listening after a specific `read_time`.
|
||||
//
|
||||
// The client must know the state of matching documents at this time.
|
||||
google.protobuf.Timestamp read_time = 11;
|
||||
}
|
||||
|
||||
// The target ID that identifies the target on the stream. Must be a positive
|
||||
// number and non-zero.
|
||||
int32 target_id = 5;
|
||||
|
||||
// If the target should be removed once it is current and consistent.
|
||||
bool once = 6;
|
||||
}
|
||||
|
||||
// Targets being watched have changed.
|
||||
message TargetChange {
|
||||
// The type of change.
|
||||
enum TargetChangeType {
|
||||
// No change has occurred. Used only to send an updated `resume_token`.
|
||||
NO_CHANGE = 0;
|
||||
|
||||
// The targets have been added.
|
||||
ADD = 1;
|
||||
|
||||
// The targets have been removed.
|
||||
REMOVE = 2;
|
||||
|
||||
// The targets reflect all changes committed before the targets were added
|
||||
// to the stream.
|
||||
//
|
||||
// This will be sent after or with a `read_time` that is greater than or
|
||||
// equal to the time at which the targets were added.
|
||||
//
|
||||
// Listeners can wait for this change if read-after-write semantics
|
||||
// are desired.
|
||||
CURRENT = 3;
|
||||
|
||||
// The targets have been reset, and a new initial state for the targets
|
||||
// will be returned in subsequent changes.
|
||||
//
|
||||
// After the initial state is complete, `CURRENT` will be returned even
|
||||
// if the target was previously indicated to be `CURRENT`.
|
||||
RESET = 4;
|
||||
}
|
||||
|
||||
// The type of change that occurred.
|
||||
TargetChangeType target_change_type = 1;
|
||||
|
||||
// The target IDs of targets that have changed.
|
||||
//
|
||||
// If empty, the change applies to all targets.
|
||||
//
|
||||
// The order of the target IDs is not defined.
|
||||
repeated int32 target_ids = 2;
|
||||
|
||||
// The error that resulted in this change, if applicable.
|
||||
google.rpc.Status cause = 3;
|
||||
|
||||
// A token that can be used to resume the stream for the given `target_ids`,
|
||||
// or all targets if `target_ids` is empty.
|
||||
//
|
||||
// Not set on every target change.
|
||||
bytes resume_token = 4;
|
||||
|
||||
// The consistent `read_time` for the given `target_ids` (omitted when the
|
||||
// target_ids are not at a consistent snapshot).
|
||||
//
|
||||
// The stream is guaranteed to send a `read_time` with `target_ids` empty
|
||||
// whenever the entire stream reaches a new consistent snapshot. ADD,
|
||||
// CURRENT, and RESET messages are guaranteed to (eventually) result in a
|
||||
// new consistent snapshot (while NO_CHANGE and REMOVE messages are not).
|
||||
//
|
||||
// For a given stream, `read_time` is guaranteed to be monotonically
|
||||
// increasing.
|
||||
google.protobuf.Timestamp read_time = 6;
|
||||
}
|
||||
|
||||
// The request for [Firestore.ListCollectionIds][google.firestore.v1beta1.Firestore.ListCollectionIds].
|
||||
message ListCollectionIdsRequest {
|
||||
// Required. The parent document. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
// For example:
|
||||
// `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`
|
||||
string parent = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The maximum number of results to return.
|
||||
int32 page_size = 2;
|
||||
|
||||
// A page token. Must be a value from
|
||||
// [ListCollectionIdsResponse][google.firestore.v1beta1.ListCollectionIdsResponse].
|
||||
string page_token = 3;
|
||||
}
|
||||
|
||||
// The response from [Firestore.ListCollectionIds][google.firestore.v1beta1.Firestore.ListCollectionIds].
|
||||
message ListCollectionIdsResponse {
|
||||
// The collection ids.
|
||||
repeated string collection_ids = 1;
|
||||
|
||||
// A page token that may be used to continue the list.
|
||||
string next_page_token = 2;
|
||||
}
|
||||
|
||||
// The request for [Firestore.BatchWrite][google.firestore.v1beta1.Firestore.BatchWrite].
|
||||
message BatchWriteRequest {
|
||||
// Required. The database name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}`.
|
||||
string database = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The writes to apply.
|
||||
//
|
||||
// Method does not apply writes atomically and does not guarantee ordering.
|
||||
// Each write succeeds or fails independently. You cannot write to the same
|
||||
// document more than once per request.
|
||||
repeated Write writes = 2;
|
||||
|
||||
// Labels associated with this batch write.
|
||||
map<string, string> labels = 3;
|
||||
}
|
||||
|
||||
// The response from [Firestore.BatchWrite][google.firestore.v1beta1.Firestore.BatchWrite].
|
||||
message BatchWriteResponse {
|
||||
// The result of applying the writes.
|
||||
//
|
||||
// This i-th write result corresponds to the i-th write in the
|
||||
// request.
|
||||
repeated WriteResult write_results = 1;
|
||||
|
||||
// The status of applying the writes.
|
||||
//
|
||||
// This i-th write status corresponds to the i-th write in the
|
||||
// request.
|
||||
repeated google.rpc.Status status = 2;
|
||||
}
|
||||
300
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1beta1/query.proto
generated
vendored
Normal file
300
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1beta1/query.proto
generated
vendored
Normal file
@@ -0,0 +1,300 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1beta1;
|
||||
|
||||
import "google/firestore/v1beta1/document.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1Beta1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1beta1/firestorepb;firestorepb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "QueryProto";
|
||||
option java_package = "com.google.firestore.v1beta1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1beta1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1beta1";
|
||||
|
||||
// A Firestore query.
|
||||
message StructuredQuery {
|
||||
// A selection of a collection, such as `messages as m1`.
|
||||
message CollectionSelector {
|
||||
// The collection ID.
|
||||
// When set, selects only collections with this ID.
|
||||
string collection_id = 2;
|
||||
|
||||
// When false, selects only collections that are immediate children of
|
||||
// the `parent` specified in the containing `RunQueryRequest`.
|
||||
// When true, selects all descendant collections.
|
||||
bool all_descendants = 3;
|
||||
}
|
||||
|
||||
// A filter.
|
||||
message Filter {
|
||||
// The type of filter.
|
||||
oneof filter_type {
|
||||
// A composite filter.
|
||||
CompositeFilter composite_filter = 1;
|
||||
|
||||
// A filter on a document field.
|
||||
FieldFilter field_filter = 2;
|
||||
|
||||
// A filter that takes exactly one argument.
|
||||
UnaryFilter unary_filter = 3;
|
||||
}
|
||||
}
|
||||
|
||||
// A filter that merges multiple other filters using the given operator.
|
||||
message CompositeFilter {
|
||||
// A composite filter operator.
|
||||
enum Operator {
|
||||
// Unspecified. This value must not be used.
|
||||
OPERATOR_UNSPECIFIED = 0;
|
||||
|
||||
// The results are required to satisfy each of the combined filters.
|
||||
AND = 1;
|
||||
}
|
||||
|
||||
// The operator for combining multiple filters.
|
||||
Operator op = 1;
|
||||
|
||||
// The list of filters to combine.
|
||||
// Must contain at least one filter.
|
||||
repeated Filter filters = 2;
|
||||
}
|
||||
|
||||
// A filter on a specific field.
|
||||
message FieldFilter {
|
||||
// A field filter operator.
|
||||
enum Operator {
|
||||
// Unspecified. This value must not be used.
|
||||
OPERATOR_UNSPECIFIED = 0;
|
||||
|
||||
// The given `field` is less than the given `value`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `field` come first in `order_by`.
|
||||
LESS_THAN = 1;
|
||||
|
||||
// The given `field` is less than or equal to the given `value`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `field` come first in `order_by`.
|
||||
LESS_THAN_OR_EQUAL = 2;
|
||||
|
||||
// The given `field` is greater than the given `value`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `field` come first in `order_by`.
|
||||
GREATER_THAN = 3;
|
||||
|
||||
// The given `field` is greater than or equal to the given `value`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `field` come first in `order_by`.
|
||||
GREATER_THAN_OR_EQUAL = 4;
|
||||
|
||||
// The given `field` is equal to the given `value`.
|
||||
EQUAL = 5;
|
||||
|
||||
// The given `field` is not equal to the given `value`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * No other `NOT_EQUAL`, `NOT_IN`, `IS_NOT_NULL`, or `IS_NOT_NAN`.
|
||||
// * That `field` comes first in the `order_by`.
|
||||
NOT_EQUAL = 6;
|
||||
|
||||
// The given `field` is an array that contains the given `value`.
|
||||
ARRAY_CONTAINS = 7;
|
||||
|
||||
// The given `field` is equal to at least one value in the given array.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `value` is a non-empty `ArrayValue` with at most 10 values.
|
||||
// * No other `IN` or `ARRAY_CONTAINS_ANY` or `NOT_IN`.
|
||||
IN = 8;
|
||||
|
||||
// The given `field` is an array that contains any of the values in the
|
||||
// given array.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `value` is a non-empty `ArrayValue` with at most 10 values.
|
||||
// * No other `IN` or `ARRAY_CONTAINS_ANY` or `NOT_IN`.
|
||||
ARRAY_CONTAINS_ANY = 9;
|
||||
|
||||
// The value of the `field` is not in the given array.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `value` is a non-empty `ArrayValue` with at most 10 values.
|
||||
// * No other `IN`, `ARRAY_CONTAINS_ANY`, `NOT_IN`, `NOT_EQUAL`,
|
||||
// `IS_NOT_NULL`, or `IS_NOT_NAN`.
|
||||
// * That `field` comes first in the `order_by`.
|
||||
NOT_IN = 10;
|
||||
}
|
||||
|
||||
// The field to filter by.
|
||||
FieldReference field = 1;
|
||||
|
||||
// The operator to filter by.
|
||||
Operator op = 2;
|
||||
|
||||
// The value to compare to.
|
||||
Value value = 3;
|
||||
}
|
||||
|
||||
// A filter with a single operand.
|
||||
message UnaryFilter {
|
||||
// A unary operator.
|
||||
enum Operator {
|
||||
// Unspecified. This value must not be used.
|
||||
OPERATOR_UNSPECIFIED = 0;
|
||||
|
||||
// The given `field` is equal to `NaN`.
|
||||
IS_NAN = 2;
|
||||
|
||||
// The given `field` is equal to `NULL`.
|
||||
IS_NULL = 3;
|
||||
|
||||
// The given `field` is not equal to `NaN`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * No other `NOT_EQUAL`, `NOT_IN`, `IS_NOT_NULL`, or `IS_NOT_NAN`.
|
||||
// * That `field` comes first in the `order_by`.
|
||||
IS_NOT_NAN = 4;
|
||||
|
||||
// The given `field` is not equal to `NULL`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * A single `NOT_EQUAL`, `NOT_IN`, `IS_NOT_NULL`, or `IS_NOT_NAN`.
|
||||
// * That `field` comes first in the `order_by`.
|
||||
IS_NOT_NULL = 5;
|
||||
}
|
||||
|
||||
// The unary operator to apply.
|
||||
Operator op = 1;
|
||||
|
||||
// The argument to the filter.
|
||||
oneof operand_type {
|
||||
// The field to which to apply the operator.
|
||||
FieldReference field = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// A reference to a field, such as `max(messages.time) as max_time`.
|
||||
message FieldReference {
|
||||
string field_path = 2;
|
||||
}
|
||||
|
||||
// An order on a field.
|
||||
message Order {
|
||||
// The field to order by.
|
||||
FieldReference field = 1;
|
||||
|
||||
// The direction to order by. Defaults to `ASCENDING`.
|
||||
Direction direction = 2;
|
||||
}
|
||||
|
||||
// The projection of document's fields to return.
|
||||
message Projection {
|
||||
// The fields to return.
|
||||
//
|
||||
// If empty, all fields are returned. To only return the name
|
||||
// of the document, use `['__name__']`.
|
||||
repeated FieldReference fields = 2;
|
||||
}
|
||||
|
||||
// A sort direction.
|
||||
enum Direction {
|
||||
// Unspecified.
|
||||
DIRECTION_UNSPECIFIED = 0;
|
||||
|
||||
// Ascending.
|
||||
ASCENDING = 1;
|
||||
|
||||
// Descending.
|
||||
DESCENDING = 2;
|
||||
}
|
||||
|
||||
// The projection to return.
|
||||
Projection select = 1;
|
||||
|
||||
// The collections to query.
|
||||
repeated CollectionSelector from = 2;
|
||||
|
||||
// The filter to apply.
|
||||
Filter where = 3;
|
||||
|
||||
// The order to apply to the query results.
|
||||
//
|
||||
// Firestore guarantees a stable ordering through the following rules:
|
||||
//
|
||||
// * Any field required to appear in `order_by`, that is not already
|
||||
// specified in `order_by`, is appended to the order in field name order
|
||||
// by default.
|
||||
// * If an order on `__name__` is not specified, it is appended by default.
|
||||
//
|
||||
// Fields are appended with the same sort direction as the last order
|
||||
// specified, or 'ASCENDING' if no order was specified. For example:
|
||||
//
|
||||
// * `SELECT * FROM Foo ORDER BY A` becomes
|
||||
// `SELECT * FROM Foo ORDER BY A, __name__`
|
||||
// * `SELECT * FROM Foo ORDER BY A DESC` becomes
|
||||
// `SELECT * FROM Foo ORDER BY A DESC, __name__ DESC`
|
||||
// * `SELECT * FROM Foo WHERE A > 1` becomes
|
||||
// `SELECT * FROM Foo WHERE A > 1 ORDER BY A, __name__`
|
||||
repeated Order order_by = 4;
|
||||
|
||||
// A starting point for the query results.
|
||||
Cursor start_at = 7;
|
||||
|
||||
// A end point for the query results.
|
||||
Cursor end_at = 8;
|
||||
|
||||
// The number of results to skip.
|
||||
//
|
||||
// Applies before limit, but after all other constraints. Must be >= 0 if
|
||||
// specified.
|
||||
int32 offset = 6;
|
||||
|
||||
// The maximum number of results to return.
|
||||
//
|
||||
// Applies after all other constraints.
|
||||
// Must be >= 0 if specified.
|
||||
google.protobuf.Int32Value limit = 5;
|
||||
}
|
||||
|
||||
// A position in a query result set.
|
||||
message Cursor {
|
||||
// The values that represent a position, in the order they appear in
|
||||
// the order by clause of a query.
|
||||
//
|
||||
// Can contain fewer values than specified in the order by clause.
|
||||
repeated Value values = 1;
|
||||
|
||||
// If the position is just before or just after the given values, relative
|
||||
// to the sort order defined by the query.
|
||||
bool before = 2;
|
||||
}
|
||||
75
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1beta1/undeliverable_first_gen_event.proto
generated
vendored
Normal file
75
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1beta1/undeliverable_first_gen_event.proto
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1beta1;
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1Beta1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1beta1/firestorepb;firestorepb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "UndeliverableFirstGenEventProto";
|
||||
option java_package = "com.google.firestore.v1beta1";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1beta1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1beta1";
|
||||
|
||||
// A message signifying an event that cannot be delivered to Cloud Functions
|
||||
// from Firestore using [Cloud Firestore triggers 1st
|
||||
// gen](https://cloud.google.com/functions/docs/calling/cloud-firestore)
|
||||
message UndeliverableFirstGenEvent {
|
||||
// Reason for events being undeliverable.
|
||||
enum Reason {
|
||||
// Unspecified.
|
||||
REASON_UNSPECIFIED = 0;
|
||||
|
||||
// Exceeding maximum event size limit
|
||||
EXCEEDING_SIZE_LIMIT = 1;
|
||||
}
|
||||
|
||||
// Document change type.
|
||||
enum DocumentChangeType {
|
||||
// Unspecified.
|
||||
DOCUMENT_CHANGE_TYPE_UNSPECIFIED = 0;
|
||||
|
||||
// Represent creation operation.
|
||||
CREATE = 1;
|
||||
|
||||
// Represent delete operation.
|
||||
DELETE = 2;
|
||||
|
||||
// Represent update operation.
|
||||
UPDATE = 3;
|
||||
}
|
||||
|
||||
// Error message for events being undeliverable.
|
||||
string message = 1;
|
||||
|
||||
// Reason for events being undeliverable.
|
||||
Reason reason = 2;
|
||||
|
||||
// The resource name of the changed document, in the format of
|
||||
// `projects/{projectId}/databases/{databaseId}/documents/{document_path}`.
|
||||
string document_name = 3;
|
||||
|
||||
// The type of the document change.
|
||||
DocumentChangeType document_change_type = 4;
|
||||
|
||||
// The names of the functions that were supposed to be triggered.
|
||||
repeated string function_name = 5;
|
||||
|
||||
// The commit time of triggered write operation.
|
||||
google.protobuf.Timestamp triggered_time = 6;
|
||||
}
|
||||
258
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1beta1/write.proto
generated
vendored
Normal file
258
server/node_modules/@google-cloud/firestore/build/protos/google/firestore/v1beta1/write.proto
generated
vendored
Normal file
@@ -0,0 +1,258 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1beta1;
|
||||
|
||||
import "google/firestore/v1beta1/common.proto";
|
||||
import "google/firestore/v1beta1/document.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1Beta1";
|
||||
option go_package = "cloud.google.com/go/firestore/apiv1beta1/firestorepb;firestorepb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "WriteProto";
|
||||
option java_package = "com.google.firestore.v1beta1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1beta1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1beta1";
|
||||
|
||||
// A write on a document.
|
||||
message Write {
|
||||
// The operation to execute.
|
||||
oneof operation {
|
||||
// A document to write.
|
||||
Document update = 1;
|
||||
|
||||
// A document name to delete. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
string delete = 2;
|
||||
|
||||
// Applies a transformation to a document.
|
||||
DocumentTransform transform = 6;
|
||||
}
|
||||
|
||||
// The fields to update in this write.
|
||||
//
|
||||
// This field can be set only when the operation is `update`.
|
||||
// If the mask is not set for an `update` and the document exists, any
|
||||
// existing data will be overwritten.
|
||||
// If the mask is set and the document on the server has fields not covered by
|
||||
// the mask, they are left unchanged.
|
||||
// Fields referenced in the mask, but not present in the input document, are
|
||||
// deleted from the document on the server.
|
||||
// The field paths in this mask must not contain a reserved field name.
|
||||
DocumentMask update_mask = 3;
|
||||
|
||||
// The transforms to perform after update.
|
||||
//
|
||||
// This field can be set only when the operation is `update`. If present, this
|
||||
// write is equivalent to performing `update` and `transform` to the same
|
||||
// document atomically and in order.
|
||||
repeated DocumentTransform.FieldTransform update_transforms = 7;
|
||||
|
||||
// An optional precondition on the document.
|
||||
//
|
||||
// The write will fail if this is set and not met by the target document.
|
||||
Precondition current_document = 4;
|
||||
}
|
||||
|
||||
// A transformation of a document.
|
||||
message DocumentTransform {
|
||||
// A transformation of a field of the document.
|
||||
message FieldTransform {
|
||||
// A value that is calculated by the server.
|
||||
enum ServerValue {
|
||||
// Unspecified. This value must not be used.
|
||||
SERVER_VALUE_UNSPECIFIED = 0;
|
||||
|
||||
// The time at which the server processed the request, with millisecond
|
||||
// precision. If used on multiple fields (same or different documents) in
|
||||
// a transaction, all the fields will get the same server timestamp.
|
||||
REQUEST_TIME = 1;
|
||||
}
|
||||
|
||||
// The path of the field. See [Document.fields][google.firestore.v1beta1.Document.fields] for the field path syntax
|
||||
// reference.
|
||||
string field_path = 1;
|
||||
|
||||
// The transformation to apply on the field.
|
||||
oneof transform_type {
|
||||
// Sets the field to the given server value.
|
||||
ServerValue set_to_server_value = 2;
|
||||
|
||||
// Adds the given value to the field's current value.
|
||||
//
|
||||
// This must be an integer or a double value.
|
||||
// If the field is not an integer or double, or if the field does not yet
|
||||
// exist, the transformation will set the field to the given value.
|
||||
// If either of the given value or the current field value are doubles,
|
||||
// both values will be interpreted as doubles. Double arithmetic and
|
||||
// representation of double values follow IEEE 754 semantics.
|
||||
// If there is positive/negative integer overflow, the field is resolved
|
||||
// to the largest magnitude positive/negative integer.
|
||||
Value increment = 3;
|
||||
|
||||
// Sets the field to the maximum of its current value and the given value.
|
||||
//
|
||||
// This must be an integer or a double value.
|
||||
// If the field is not an integer or double, or if the field does not yet
|
||||
// exist, the transformation will set the field to the given value.
|
||||
// If a maximum operation is applied where the field and the input value
|
||||
// are of mixed types (that is - one is an integer and one is a double)
|
||||
// the field takes on the type of the larger operand. If the operands are
|
||||
// equivalent (e.g. 3 and 3.0), the field does not change.
|
||||
// 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and
|
||||
// zero input value is always the stored value.
|
||||
// The maximum of any numeric value x and NaN is NaN.
|
||||
Value maximum = 4;
|
||||
|
||||
// Sets the field to the minimum of its current value and the given value.
|
||||
//
|
||||
// This must be an integer or a double value.
|
||||
// If the field is not an integer or double, or if the field does not yet
|
||||
// exist, the transformation will set the field to the input value.
|
||||
// If a minimum operation is applied where the field and the input value
|
||||
// are of mixed types (that is - one is an integer and one is a double)
|
||||
// the field takes on the type of the smaller operand. If the operands are
|
||||
// equivalent (e.g. 3 and 3.0), the field does not change.
|
||||
// 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and
|
||||
// zero input value is always the stored value.
|
||||
// The minimum of any numeric value x and NaN is NaN.
|
||||
Value minimum = 5;
|
||||
|
||||
// Append the given elements in order if they are not already present in
|
||||
// the current field value.
|
||||
// If the field is not an array, or if the field does not yet exist, it is
|
||||
// first set to the empty array.
|
||||
//
|
||||
// Equivalent numbers of different types (e.g. 3L and 3.0) are
|
||||
// considered equal when checking if a value is missing.
|
||||
// NaN is equal to NaN, and Null is equal to Null.
|
||||
// If the input contains multiple equivalent values, only the first will
|
||||
// be considered.
|
||||
//
|
||||
// The corresponding transform_result will be the null value.
|
||||
ArrayValue append_missing_elements = 6;
|
||||
|
||||
// Remove all of the given elements from the array in the field.
|
||||
// If the field is not an array, or if the field does not yet exist, it is
|
||||
// set to the empty array.
|
||||
//
|
||||
// Equivalent numbers of the different types (e.g. 3L and 3.0) are
|
||||
// considered equal when deciding whether an element should be removed.
|
||||
// NaN is equal to NaN, and Null is equal to Null.
|
||||
// This will remove all equivalent values if there are duplicates.
|
||||
//
|
||||
// The corresponding transform_result will be the null value.
|
||||
ArrayValue remove_all_from_array = 7;
|
||||
}
|
||||
}
|
||||
|
||||
// The name of the document to transform.
|
||||
string document = 1;
|
||||
|
||||
// The list of transformations to apply to the fields of the document, in
|
||||
// order.
|
||||
// This must not be empty.
|
||||
repeated FieldTransform field_transforms = 2;
|
||||
}
|
||||
|
||||
// The result of applying a write.
|
||||
message WriteResult {
|
||||
// The last update time of the document after applying the write. Not set
|
||||
// after a `delete`.
|
||||
//
|
||||
// If the write did not actually change the document, this will be the
|
||||
// previous update_time.
|
||||
google.protobuf.Timestamp update_time = 1;
|
||||
|
||||
// The results of applying each [DocumentTransform.FieldTransform][google.firestore.v1beta1.DocumentTransform.FieldTransform], in the
|
||||
// same order.
|
||||
repeated Value transform_results = 2;
|
||||
}
|
||||
|
||||
// A [Document][google.firestore.v1beta1.Document] has changed.
|
||||
//
|
||||
// May be the result of multiple [writes][google.firestore.v1beta1.Write], including deletes, that
|
||||
// ultimately resulted in a new value for the [Document][google.firestore.v1beta1.Document].
|
||||
//
|
||||
// Multiple [DocumentChange][google.firestore.v1beta1.DocumentChange] messages may be returned for the same logical
|
||||
// change, if multiple targets are affected.
|
||||
message DocumentChange {
|
||||
// The new state of the [Document][google.firestore.v1beta1.Document].
|
||||
//
|
||||
// If `mask` is set, contains only fields that were updated or added.
|
||||
Document document = 1;
|
||||
|
||||
// A set of target IDs of targets that match this document.
|
||||
repeated int32 target_ids = 5;
|
||||
|
||||
// A set of target IDs for targets that no longer match this document.
|
||||
repeated int32 removed_target_ids = 6;
|
||||
}
|
||||
|
||||
// A [Document][google.firestore.v1beta1.Document] has been deleted.
|
||||
//
|
||||
// May be the result of multiple [writes][google.firestore.v1beta1.Write], including updates, the
|
||||
// last of which deleted the [Document][google.firestore.v1beta1.Document].
|
||||
//
|
||||
// Multiple [DocumentDelete][google.firestore.v1beta1.DocumentDelete] messages may be returned for the same logical
|
||||
// delete, if multiple targets are affected.
|
||||
message DocumentDelete {
|
||||
// The resource name of the [Document][google.firestore.v1beta1.Document] that was deleted.
|
||||
string document = 1;
|
||||
|
||||
// A set of target IDs for targets that previously matched this entity.
|
||||
repeated int32 removed_target_ids = 6;
|
||||
|
||||
// The read timestamp at which the delete was observed.
|
||||
//
|
||||
// Greater or equal to the `commit_time` of the delete.
|
||||
google.protobuf.Timestamp read_time = 4;
|
||||
}
|
||||
|
||||
// A [Document][google.firestore.v1beta1.Document] has been removed from the view of the targets.
|
||||
//
|
||||
// Sent if the document is no longer relevant to a target and is out of view.
|
||||
// Can be sent instead of a DocumentDelete or a DocumentChange if the server
|
||||
// can not send the new value of the document.
|
||||
//
|
||||
// Multiple [DocumentRemove][google.firestore.v1beta1.DocumentRemove] messages may be returned for the same logical
|
||||
// write or delete, if multiple targets are affected.
|
||||
message DocumentRemove {
|
||||
// The resource name of the [Document][google.firestore.v1beta1.Document] that has gone out of view.
|
||||
string document = 1;
|
||||
|
||||
// A set of target IDs for targets that previously matched this document.
|
||||
repeated int32 removed_target_ids = 2;
|
||||
|
||||
// The read timestamp at which the remove was observed.
|
||||
//
|
||||
// Greater or equal to the `commit_time` of the change/delete/remove.
|
||||
google.protobuf.Timestamp read_time = 4;
|
||||
}
|
||||
|
||||
// A digest of all the documents that match a given target.
|
||||
message ExistenceFilter {
|
||||
// The target ID to which this filter applies.
|
||||
int32 target_id = 1;
|
||||
|
||||
// The total count of documents that match [target_id][google.firestore.v1beta1.ExistenceFilter.target_id].
|
||||
//
|
||||
// If different from the count of documents in the client that match, the
|
||||
// client must manually determine which documents no longer match the target.
|
||||
int32 count = 2;
|
||||
}
|
||||
246
server/node_modules/@google-cloud/firestore/build/protos/google/longrunning/operations.proto
generated
vendored
Normal file
246
server/node_modules/@google-cloud/firestore/build/protos/google/longrunning/operations.proto
generated
vendored
Normal file
@@ -0,0 +1,246 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.longrunning;
|
||||
|
||||
import "google/api/annotations.proto";
|
||||
import "google/api/client.proto";
|
||||
import "google/protobuf/any.proto";
|
||||
import "google/protobuf/descriptor.proto";
|
||||
import "google/protobuf/duration.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/rpc/status.proto";
|
||||
|
||||
option cc_enable_arenas = true;
|
||||
option csharp_namespace = "Google.LongRunning";
|
||||
option go_package = "cloud.google.com/go/longrunning/autogen/longrunningpb;longrunningpb";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "OperationsProto";
|
||||
option java_package = "com.google.longrunning";
|
||||
option objc_class_prefix = "GLRUN";
|
||||
option php_namespace = "Google\\LongRunning";
|
||||
|
||||
extend google.protobuf.MethodOptions {
|
||||
// Additional information regarding long-running operations.
|
||||
// In particular, this specifies the types that are returned from
|
||||
// long-running operations.
|
||||
//
|
||||
// Required for methods that return `google.longrunning.Operation`; invalid
|
||||
// otherwise.
|
||||
google.longrunning.OperationInfo operation_info = 1049;
|
||||
}
|
||||
|
||||
// Manages long-running operations with an API service.
|
||||
//
|
||||
// When an API method normally takes long time to complete, it can be designed
|
||||
// to return [Operation][google.longrunning.Operation] to the client, and the
|
||||
// client can use this interface to receive the real response asynchronously by
|
||||
// polling the operation resource, or pass the operation resource to another API
|
||||
// (such as Pub/Sub API) to receive the response. Any API service that returns
|
||||
// long-running operations should implement the `Operations` interface so
|
||||
// developers can have a consistent client experience.
|
||||
service Operations {
|
||||
option (google.api.default_host) = "longrunning.googleapis.com";
|
||||
|
||||
// Lists operations that match the specified filter in the request. If the
|
||||
// server doesn't support this method, it returns `UNIMPLEMENTED`.
|
||||
rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1/{name=operations}"
|
||||
};
|
||||
option (google.api.method_signature) = "name,filter";
|
||||
}
|
||||
|
||||
// Gets the latest state of a long-running operation. Clients can use this
|
||||
// method to poll the operation result at intervals as recommended by the API
|
||||
// service.
|
||||
rpc GetOperation(GetOperationRequest) returns (Operation) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1/{name=operations/**}"
|
||||
};
|
||||
option (google.api.method_signature) = "name";
|
||||
}
|
||||
|
||||
// Deletes a long-running operation. This method indicates that the client is
|
||||
// no longer interested in the operation result. It does not cancel the
|
||||
// operation. If the server doesn't support this method, it returns
|
||||
// `google.rpc.Code.UNIMPLEMENTED`.
|
||||
rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
delete: "/v1/{name=operations/**}"
|
||||
};
|
||||
option (google.api.method_signature) = "name";
|
||||
}
|
||||
|
||||
// Starts asynchronous cancellation on a long-running operation. The server
|
||||
// makes a best effort to cancel the operation, but success is not
|
||||
// guaranteed. If the server doesn't support this method, it returns
|
||||
// `google.rpc.Code.UNIMPLEMENTED`. Clients can use
|
||||
// [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
|
||||
// other methods to check whether the cancellation succeeded or whether the
|
||||
// operation completed despite cancellation. On successful cancellation,
|
||||
// the operation is not deleted; instead, it becomes an operation with
|
||||
// an [Operation.error][google.longrunning.Operation.error] value with a
|
||||
// [google.rpc.Status.code][google.rpc.Status.code] of `1`, corresponding to
|
||||
// `Code.CANCELLED`.
|
||||
rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{name=operations/**}:cancel"
|
||||
body: "*"
|
||||
};
|
||||
option (google.api.method_signature) = "name";
|
||||
}
|
||||
|
||||
// Waits until the specified long-running operation is done or reaches at most
|
||||
// a specified timeout, returning the latest state. If the operation is
|
||||
// already done, the latest state is immediately returned. If the timeout
|
||||
// specified is greater than the default HTTP/RPC timeout, the HTTP/RPC
|
||||
// timeout is used. If the server does not support this method, it returns
|
||||
// `google.rpc.Code.UNIMPLEMENTED`.
|
||||
// Note that this method is on a best-effort basis. It may return the latest
|
||||
// state before the specified timeout (including immediately), meaning even an
|
||||
// immediate response is no guarantee that the operation is done.
|
||||
rpc WaitOperation(WaitOperationRequest) returns (Operation) {}
|
||||
}
|
||||
|
||||
// This resource represents a long-running operation that is the result of a
|
||||
// network API call.
|
||||
message Operation {
|
||||
// The server-assigned name, which is only unique within the same service that
|
||||
// originally returns it. If you use the default HTTP mapping, the
|
||||
// `name` should be a resource name ending with `operations/{unique_id}`.
|
||||
string name = 1;
|
||||
|
||||
// Service-specific metadata associated with the operation. It typically
|
||||
// contains progress information and common metadata such as create time.
|
||||
// Some services might not provide such metadata. Any method that returns a
|
||||
// long-running operation should document the metadata type, if any.
|
||||
google.protobuf.Any metadata = 2;
|
||||
|
||||
// If the value is `false`, it means the operation is still in progress.
|
||||
// If `true`, the operation is completed, and either `error` or `response` is
|
||||
// available.
|
||||
bool done = 3;
|
||||
|
||||
// The operation result, which can be either an `error` or a valid `response`.
|
||||
// If `done` == `false`, neither `error` nor `response` is set.
|
||||
// If `done` == `true`, exactly one of `error` or `response` can be set.
|
||||
// Some services might not provide the result.
|
||||
oneof result {
|
||||
// The error result of the operation in case of failure or cancellation.
|
||||
google.rpc.Status error = 4;
|
||||
|
||||
// The normal, successful response of the operation. If the original
|
||||
// method returns no data on success, such as `Delete`, the response is
|
||||
// `google.protobuf.Empty`. If the original method is standard
|
||||
// `Get`/`Create`/`Update`, the response should be the resource. For other
|
||||
// methods, the response should have the type `XxxResponse`, where `Xxx`
|
||||
// is the original method name. For example, if the original method name
|
||||
// is `TakeSnapshot()`, the inferred response type is
|
||||
// `TakeSnapshotResponse`.
|
||||
google.protobuf.Any response = 5;
|
||||
}
|
||||
}
|
||||
|
||||
// The request message for
|
||||
// [Operations.GetOperation][google.longrunning.Operations.GetOperation].
|
||||
message GetOperationRequest {
|
||||
// The name of the operation resource.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// The request message for
|
||||
// [Operations.ListOperations][google.longrunning.Operations.ListOperations].
|
||||
message ListOperationsRequest {
|
||||
// The name of the operation's parent resource.
|
||||
string name = 4;
|
||||
|
||||
// The standard list filter.
|
||||
string filter = 1;
|
||||
|
||||
// The standard list page size.
|
||||
int32 page_size = 2;
|
||||
|
||||
// The standard list page token.
|
||||
string page_token = 3;
|
||||
}
|
||||
|
||||
// The response message for
|
||||
// [Operations.ListOperations][google.longrunning.Operations.ListOperations].
|
||||
message ListOperationsResponse {
|
||||
// A list of operations that matches the specified filter in the request.
|
||||
repeated Operation operations = 1;
|
||||
|
||||
// The standard List next-page token.
|
||||
string next_page_token = 2;
|
||||
}
|
||||
|
||||
// The request message for
|
||||
// [Operations.CancelOperation][google.longrunning.Operations.CancelOperation].
|
||||
message CancelOperationRequest {
|
||||
// The name of the operation resource to be cancelled.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// The request message for
|
||||
// [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation].
|
||||
message DeleteOperationRequest {
|
||||
// The name of the operation resource to be deleted.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// The request message for
|
||||
// [Operations.WaitOperation][google.longrunning.Operations.WaitOperation].
|
||||
message WaitOperationRequest {
|
||||
// The name of the operation resource to wait on.
|
||||
string name = 1;
|
||||
|
||||
// The maximum duration to wait before timing out. If left blank, the wait
|
||||
// will be at most the time permitted by the underlying HTTP/RPC protocol.
|
||||
// If RPC context deadline is also specified, the shorter one will be used.
|
||||
google.protobuf.Duration timeout = 2;
|
||||
}
|
||||
|
||||
// A message representing the message types used by a long-running operation.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// rpc Export(ExportRequest) returns (google.longrunning.Operation) {
|
||||
// option (google.longrunning.operation_info) = {
|
||||
// response_type: "ExportResponse"
|
||||
// metadata_type: "ExportMetadata"
|
||||
// };
|
||||
// }
|
||||
message OperationInfo {
|
||||
// Required. The message name of the primary return type for this
|
||||
// long-running operation.
|
||||
// This type will be used to deserialize the LRO's response.
|
||||
//
|
||||
// If the response is in a different package from the rpc, a fully-qualified
|
||||
// message name must be used (e.g. `google.protobuf.Struct`).
|
||||
//
|
||||
// Note: Altering this value constitutes a breaking change.
|
||||
string response_type = 1;
|
||||
|
||||
// Required. The message name of the metadata type for this long-running
|
||||
// operation.
|
||||
//
|
||||
// If the response is in a different package from the rpc, a fully-qualified
|
||||
// message name must be used (e.g. `google.protobuf.Struct`).
|
||||
//
|
||||
// Note: Altering this value constitutes a breaking change.
|
||||
string metadata_type = 2;
|
||||
}
|
||||
162
server/node_modules/@google-cloud/firestore/build/protos/google/protobuf/any.proto
generated
vendored
Normal file
162
server/node_modules/@google-cloud/firestore/build/protos/google/protobuf/any.proto
generated
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option go_package = "google.golang.org/protobuf/types/known/anypb";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "AnyProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
|
||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||
// URL that describes the type of the serialized message.
|
||||
//
|
||||
// Protobuf library provides support to pack/unpack Any values in the form
|
||||
// of utility functions or additional generated methods of the Any type.
|
||||
//
|
||||
// Example 1: Pack and unpack a message in C++.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any;
|
||||
// any.PackFrom(foo);
|
||||
// ...
|
||||
// if (any.UnpackTo(&foo)) {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Example 2: Pack and unpack a message in Java.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any = Any.pack(foo);
|
||||
// ...
|
||||
// if (any.is(Foo.class)) {
|
||||
// foo = any.unpack(Foo.class);
|
||||
// }
|
||||
// // or ...
|
||||
// if (any.isSameTypeAs(Foo.getDefaultInstance())) {
|
||||
// foo = any.unpack(Foo.getDefaultInstance());
|
||||
// }
|
||||
//
|
||||
// Example 3: Pack and unpack a message in Python.
|
||||
//
|
||||
// foo = Foo(...)
|
||||
// any = Any()
|
||||
// any.Pack(foo)
|
||||
// ...
|
||||
// if any.Is(Foo.DESCRIPTOR):
|
||||
// any.Unpack(foo)
|
||||
// ...
|
||||
//
|
||||
// Example 4: Pack and unpack a message in Go
|
||||
//
|
||||
// foo := &pb.Foo{...}
|
||||
// any, err := anypb.New(foo)
|
||||
// if err != nil {
|
||||
// ...
|
||||
// }
|
||||
// ...
|
||||
// foo := &pb.Foo{}
|
||||
// if err := any.UnmarshalTo(foo); err != nil {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// The pack methods provided by protobuf library will by default use
|
||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||
// methods only use the fully qualified type name after the last '/'
|
||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||
// name "y.z".
|
||||
//
|
||||
// JSON
|
||||
// ====
|
||||
// The JSON representation of an `Any` value uses the regular
|
||||
// representation of the deserialized, embedded message, with an
|
||||
// additional field `@type` which contains the type URL. Example:
|
||||
//
|
||||
// package google.profile;
|
||||
// message Person {
|
||||
// string first_name = 1;
|
||||
// string last_name = 2;
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.profile.Person",
|
||||
// "firstName": <string>,
|
||||
// "lastName": <string>
|
||||
// }
|
||||
//
|
||||
// If the embedded message type is well-known and has a custom JSON
|
||||
// representation, that representation will be embedded adding a field
|
||||
// `value` which holds the custom JSON in addition to the `@type`
|
||||
// field. Example (for message [google.protobuf.Duration][]):
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||
// "value": "1.212s"
|
||||
// }
|
||||
//
|
||||
message Any {
|
||||
// A URL/resource name that uniquely identifies the type of the serialized
|
||||
// protocol buffer message. This string must contain at least
|
||||
// one "/" character. The last segment of the URL's path must represent
|
||||
// the fully qualified name of the type (as in
|
||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||
// (e.g., leading "." is not accepted).
|
||||
//
|
||||
// In practice, teams usually precompile into the binary all types that they
|
||||
// expect it to use in the context of Any. However, for URLs which use the
|
||||
// scheme `http`, `https`, or no scheme, one can optionally set up a type
|
||||
// server that maps type URLs to message definitions as follows:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
// URL, or have them precompiled into a binary to avoid any
|
||||
// lookup. Therefore, binary compatibility needs to be preserved
|
||||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Note: this functionality is not currently available in the official
|
||||
// protobuf release, and it is not used for type URLs beginning with
|
||||
// type.googleapis.com. As of May 2023, there are no widely used type server
|
||||
// implementations and no plans to implement one.
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
string type_url = 1;
|
||||
|
||||
// Must be a valid serialized protocol buffer of the above specified type.
|
||||
bytes value = 2;
|
||||
}
|
||||
1219
server/node_modules/@google-cloud/firestore/build/protos/google/protobuf/descriptor.proto
generated
vendored
Normal file
1219
server/node_modules/@google-cloud/firestore/build/protos/google/protobuf/descriptor.proto
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
115
server/node_modules/@google-cloud/firestore/build/protos/google/protobuf/duration.proto
generated
vendored
Normal file
115
server/node_modules/@google-cloud/firestore/build/protos/google/protobuf/duration.proto
generated
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "google.golang.org/protobuf/types/known/durationpb";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "DurationProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
// resolution. It is independent of any calendar and concepts like "day"
|
||||
// or "month". It is related to Timestamp in that the difference between
|
||||
// two Timestamp values is a Duration and it can be added or subtracted
|
||||
// from a Timestamp. Range is approximately +-10,000 years.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Timestamp end = ...;
|
||||
// Duration duration = ...;
|
||||
//
|
||||
// duration.seconds = end.seconds - start.seconds;
|
||||
// duration.nanos = end.nanos - start.nanos;
|
||||
//
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (duration.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Duration duration = ...;
|
||||
// Timestamp end = ...;
|
||||
//
|
||||
// end.seconds = start.seconds + duration.seconds;
|
||||
// end.nanos = start.nanos + duration.nanos;
|
||||
//
|
||||
// if (end.nanos < 0) {
|
||||
// end.seconds -= 1;
|
||||
// end.nanos += 1000000000;
|
||||
// } else if (end.nanos >= 1000000000) {
|
||||
// end.seconds += 1;
|
||||
// end.nanos -= 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 3: Compute Duration from datetime.timedelta in Python.
|
||||
//
|
||||
// td = datetime.timedelta(days=3, minutes=10)
|
||||
// duration = Duration()
|
||||
// duration.FromTimedelta(td)
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Duration type is encoded as a string rather than an
|
||||
// object, where the string ends in the suffix "s" (indicating seconds) and
|
||||
// is preceded by the number of seconds, with nanoseconds expressed as
|
||||
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
|
||||
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
|
||||
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
|
||||
// microsecond should be expressed in JSON format as "3.000001s".
|
||||
//
|
||||
message Duration {
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
||||
int64 seconds = 1;
|
||||
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
||||
51
server/node_modules/@google-cloud/firestore/build/protos/google/protobuf/empty.proto
generated
vendored
Normal file
51
server/node_modules/@google-cloud/firestore/build/protos/google/protobuf/empty.proto
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option go_package = "google.golang.org/protobuf/types/known/emptypb";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "EmptyProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option cc_enable_arenas = true;
|
||||
|
||||
// A generic empty message that you can re-use to avoid defining duplicated
|
||||
// empty messages in your APIs. A typical example is to use it as the request
|
||||
// or the response type of an API method. For instance:
|
||||
//
|
||||
// service Foo {
|
||||
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
|
||||
// }
|
||||
//
|
||||
message Empty {}
|
||||
245
server/node_modules/@google-cloud/firestore/build/protos/google/protobuf/field_mask.proto
generated
vendored
Normal file
245
server/node_modules/@google-cloud/firestore/build/protos/google/protobuf/field_mask.proto
generated
vendored
Normal file
@@ -0,0 +1,245 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "FieldMaskProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option go_package = "google.golang.org/protobuf/types/known/fieldmaskpb";
|
||||
option cc_enable_arenas = true;
|
||||
|
||||
// `FieldMask` represents a set of symbolic field paths, for example:
|
||||
//
|
||||
// paths: "f.a"
|
||||
// paths: "f.b.d"
|
||||
//
|
||||
// Here `f` represents a field in some root message, `a` and `b`
|
||||
// fields in the message found in `f`, and `d` a field found in the
|
||||
// message in `f.b`.
|
||||
//
|
||||
// Field masks are used to specify a subset of fields that should be
|
||||
// returned by a get operation or modified by an update operation.
|
||||
// Field masks also have a custom JSON encoding (see below).
|
||||
//
|
||||
// # Field Masks in Projections
|
||||
//
|
||||
// When used in the context of a projection, a response message or
|
||||
// sub-message is filtered by the API to only contain those fields as
|
||||
// specified in the mask. For example, if the mask in the previous
|
||||
// example is applied to a response message as follows:
|
||||
//
|
||||
// f {
|
||||
// a : 22
|
||||
// b {
|
||||
// d : 1
|
||||
// x : 2
|
||||
// }
|
||||
// y : 13
|
||||
// }
|
||||
// z: 8
|
||||
//
|
||||
// The result will not contain specific values for fields x,y and z
|
||||
// (their value will be set to the default, and omitted in proto text
|
||||
// output):
|
||||
//
|
||||
//
|
||||
// f {
|
||||
// a : 22
|
||||
// b {
|
||||
// d : 1
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// A repeated field is not allowed except at the last position of a
|
||||
// paths string.
|
||||
//
|
||||
// If a FieldMask object is not present in a get operation, the
|
||||
// operation applies to all fields (as if a FieldMask of all fields
|
||||
// had been specified).
|
||||
//
|
||||
// Note that a field mask does not necessarily apply to the
|
||||
// top-level response message. In case of a REST get operation, the
|
||||
// field mask applies directly to the response, but in case of a REST
|
||||
// list operation, the mask instead applies to each individual message
|
||||
// in the returned resource list. In case of a REST custom method,
|
||||
// other definitions may be used. Where the mask applies will be
|
||||
// clearly documented together with its declaration in the API. In
|
||||
// any case, the effect on the returned resource/resources is required
|
||||
// behavior for APIs.
|
||||
//
|
||||
// # Field Masks in Update Operations
|
||||
//
|
||||
// A field mask in update operations specifies which fields of the
|
||||
// targeted resource are going to be updated. The API is required
|
||||
// to only change the values of the fields as specified in the mask
|
||||
// and leave the others untouched. If a resource is passed in to
|
||||
// describe the updated values, the API ignores the values of all
|
||||
// fields not covered by the mask.
|
||||
//
|
||||
// If a repeated field is specified for an update operation, new values will
|
||||
// be appended to the existing repeated field in the target resource. Note that
|
||||
// a repeated field is only allowed in the last position of a `paths` string.
|
||||
//
|
||||
// If a sub-message is specified in the last position of the field mask for an
|
||||
// update operation, then new value will be merged into the existing sub-message
|
||||
// in the target resource.
|
||||
//
|
||||
// For example, given the target message:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d: 1
|
||||
// x: 2
|
||||
// }
|
||||
// c: [1]
|
||||
// }
|
||||
//
|
||||
// And an update message:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d: 10
|
||||
// }
|
||||
// c: [2]
|
||||
// }
|
||||
//
|
||||
// then if the field mask is:
|
||||
//
|
||||
// paths: ["f.b", "f.c"]
|
||||
//
|
||||
// then the result will be:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d: 10
|
||||
// x: 2
|
||||
// }
|
||||
// c: [1, 2]
|
||||
// }
|
||||
//
|
||||
// An implementation may provide options to override this default behavior for
|
||||
// repeated and message fields.
|
||||
//
|
||||
// In order to reset a field's value to the default, the field must
|
||||
// be in the mask and set to the default value in the provided resource.
|
||||
// Hence, in order to reset all fields of a resource, provide a default
|
||||
// instance of the resource and set all fields in the mask, or do
|
||||
// not provide a mask as described below.
|
||||
//
|
||||
// If a field mask is not present on update, the operation applies to
|
||||
// all fields (as if a field mask of all fields has been specified).
|
||||
// Note that in the presence of schema evolution, this may mean that
|
||||
// fields the client does not know and has therefore not filled into
|
||||
// the request will be reset to their default. If this is unwanted
|
||||
// behavior, a specific service may require a client to always specify
|
||||
// a field mask, producing an error if not.
|
||||
//
|
||||
// As with get operations, the location of the resource which
|
||||
// describes the updated values in the request message depends on the
|
||||
// operation kind. In any case, the effect of the field mask is
|
||||
// required to be honored by the API.
|
||||
//
|
||||
// ## Considerations for HTTP REST
|
||||
//
|
||||
// The HTTP kind of an update operation which uses a field mask must
|
||||
// be set to PATCH instead of PUT in order to satisfy HTTP semantics
|
||||
// (PUT must only be used for full updates).
|
||||
//
|
||||
// # JSON Encoding of Field Masks
|
||||
//
|
||||
// In JSON, a field mask is encoded as a single string where paths are
|
||||
// separated by a comma. Fields name in each path are converted
|
||||
// to/from lower-camel naming conventions.
|
||||
//
|
||||
// As an example, consider the following message declarations:
|
||||
//
|
||||
// message Profile {
|
||||
// User user = 1;
|
||||
// Photo photo = 2;
|
||||
// }
|
||||
// message User {
|
||||
// string display_name = 1;
|
||||
// string address = 2;
|
||||
// }
|
||||
//
|
||||
// In proto a field mask for `Profile` may look as such:
|
||||
//
|
||||
// mask {
|
||||
// paths: "user.display_name"
|
||||
// paths: "photo"
|
||||
// }
|
||||
//
|
||||
// In JSON, the same mask is represented as below:
|
||||
//
|
||||
// {
|
||||
// mask: "user.displayName,photo"
|
||||
// }
|
||||
//
|
||||
// # Field Masks and Oneof Fields
|
||||
//
|
||||
// Field masks treat fields in oneofs just as regular fields. Consider the
|
||||
// following message:
|
||||
//
|
||||
// message SampleMessage {
|
||||
// oneof test_oneof {
|
||||
// string name = 4;
|
||||
// SubMessage sub_message = 9;
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// The field mask can be:
|
||||
//
|
||||
// mask {
|
||||
// paths: "name"
|
||||
// }
|
||||
//
|
||||
// Or:
|
||||
//
|
||||
// mask {
|
||||
// paths: "sub_message"
|
||||
// }
|
||||
//
|
||||
// Note that oneof type names ("test_oneof" in this case) cannot be used in
|
||||
// paths.
|
||||
//
|
||||
// ## Field Mask Verification
|
||||
//
|
||||
// The implementation of any API method which has a FieldMask type field in the
|
||||
// request should verify the included field paths, and return an
|
||||
// `INVALID_ARGUMENT` error if any path is unmappable.
|
||||
message FieldMask {
|
||||
// The set of field mask paths.
|
||||
repeated string paths = 1;
|
||||
}
|
||||
95
server/node_modules/@google-cloud/firestore/build/protos/google/protobuf/struct.proto
generated
vendored
Normal file
95
server/node_modules/@google-cloud/firestore/build/protos/google/protobuf/struct.proto
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "google.golang.org/protobuf/types/known/structpb";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "StructProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
|
||||
// `Struct` represents a structured data value, consisting of fields
|
||||
// which map to dynamically typed values. In some languages, `Struct`
|
||||
// might be supported by a native representation. For example, in
|
||||
// scripting languages like JS a struct is represented as an
|
||||
// object. The details of that representation are described together
|
||||
// with the proto support for the language.
|
||||
//
|
||||
// The JSON representation for `Struct` is JSON object.
|
||||
message Struct {
|
||||
// Unordered map of dynamically typed values.
|
||||
map<string, Value> fields = 1;
|
||||
}
|
||||
|
||||
// `Value` represents a dynamically typed value which can be either
|
||||
// null, a number, a string, a boolean, a recursive struct value, or a
|
||||
// list of values. A producer of value is expected to set one of these
|
||||
// variants. Absence of any variant indicates an error.
|
||||
//
|
||||
// The JSON representation for `Value` is JSON value.
|
||||
message Value {
|
||||
// The kind of value.
|
||||
oneof kind {
|
||||
// Represents a null value.
|
||||
NullValue null_value = 1;
|
||||
// Represents a double value.
|
||||
double number_value = 2;
|
||||
// Represents a string value.
|
||||
string string_value = 3;
|
||||
// Represents a boolean value.
|
||||
bool bool_value = 4;
|
||||
// Represents a structured value.
|
||||
Struct struct_value = 5;
|
||||
// Represents a repeated `Value`.
|
||||
ListValue list_value = 6;
|
||||
}
|
||||
}
|
||||
|
||||
// `NullValue` is a singleton enumeration to represent the null value for the
|
||||
// `Value` type union.
|
||||
//
|
||||
// The JSON representation for `NullValue` is JSON `null`.
|
||||
enum NullValue {
|
||||
// Null value.
|
||||
NULL_VALUE = 0;
|
||||
}
|
||||
|
||||
// `ListValue` is a wrapper around a repeated field of values.
|
||||
//
|
||||
// The JSON representation for `ListValue` is JSON array.
|
||||
message ListValue {
|
||||
// Repeated field of dynamically typed values.
|
||||
repeated Value values = 1;
|
||||
}
|
||||
144
server/node_modules/@google-cloud/firestore/build/protos/google/protobuf/timestamp.proto
generated
vendored
Normal file
144
server/node_modules/@google-cloud/firestore/build/protos/google/protobuf/timestamp.proto
generated
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "google.golang.org/protobuf/types/known/timestamppb";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "TimestampProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone or local
|
||||
// calendar, encoded as a count of seconds and fractions of seconds at
|
||||
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
||||
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
||||
// Gregorian calendar backwards to year one.
|
||||
//
|
||||
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
||||
// second table is needed for interpretation, using a [24-hour linear
|
||||
// smear](https://developers.google.com/time/smear).
|
||||
//
|
||||
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
||||
// restricting to that range, we ensure that we can convert to and from [RFC
|
||||
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(time(NULL));
|
||||
// timestamp.set_nanos(0);
|
||||
//
|
||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
||||
//
|
||||
// struct timeval tv;
|
||||
// gettimeofday(&tv, NULL);
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(tv.tv_sec);
|
||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
||||
//
|
||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
||||
//
|
||||
// FILETIME ft;
|
||||
// GetSystemTimeAsFileTime(&ft);
|
||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
//
|
||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
||||
//
|
||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
||||
//
|
||||
// long millis = System.currentTimeMillis();
|
||||
//
|
||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
||||
//
|
||||
// Example 5: Compute Timestamp from Java `Instant.now()`.
|
||||
//
|
||||
// Instant now = Instant.now();
|
||||
//
|
||||
// Timestamp timestamp =
|
||||
// Timestamp.newBuilder().setSeconds(now.getEpochSecond())
|
||||
// .setNanos(now.getNano()).build();
|
||||
//
|
||||
// Example 6: Compute Timestamp from current time in Python.
|
||||
//
|
||||
// timestamp = Timestamp()
|
||||
// timestamp.GetCurrentTime()
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Timestamp type is encoded as a string in the
|
||||
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
|
||||
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
|
||||
// where {year} is always expressed using four digits while {month}, {day},
|
||||
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
||||
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
||||
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
||||
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
||||
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
||||
// able to accept both UTC and other timezones (as indicated by an offset).
|
||||
//
|
||||
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
||||
// 01:30 UTC on January 15, 2017.
|
||||
//
|
||||
// In JavaScript, one can convert a Date object to this format using the
|
||||
// standard
|
||||
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
||||
// to this format using
|
||||
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
||||
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
||||
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()
|
||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||
//
|
||||
message Timestamp {
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
int64 seconds = 1;
|
||||
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
||||
123
server/node_modules/@google-cloud/firestore/build/protos/google/protobuf/wrappers.proto
generated
vendored
Normal file
123
server/node_modules/@google-cloud/firestore/build/protos/google/protobuf/wrappers.proto
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Wrappers for primitive (non-message) types. These types are useful
|
||||
// for embedding primitives in the `google.protobuf.Any` type and for places
|
||||
// where we need to distinguish between the absence of a primitive
|
||||
// typed field and its default value.
|
||||
//
|
||||
// These wrappers have no meaningful use within repeated fields as they lack
|
||||
// the ability to detect presence on individual elements.
|
||||
// These wrappers have no meaningful use within a map or a oneof since
|
||||
// individual entries of a map or fields of a oneof can already detect presence.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "google.golang.org/protobuf/types/known/wrapperspb";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "WrappersProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
|
||||
// Wrapper message for `double`.
|
||||
//
|
||||
// The JSON representation for `DoubleValue` is JSON number.
|
||||
message DoubleValue {
|
||||
// The double value.
|
||||
double value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `float`.
|
||||
//
|
||||
// The JSON representation for `FloatValue` is JSON number.
|
||||
message FloatValue {
|
||||
// The float value.
|
||||
float value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `int64`.
|
||||
//
|
||||
// The JSON representation for `Int64Value` is JSON string.
|
||||
message Int64Value {
|
||||
// The int64 value.
|
||||
int64 value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `uint64`.
|
||||
//
|
||||
// The JSON representation for `UInt64Value` is JSON string.
|
||||
message UInt64Value {
|
||||
// The uint64 value.
|
||||
uint64 value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `int32`.
|
||||
//
|
||||
// The JSON representation for `Int32Value` is JSON number.
|
||||
message Int32Value {
|
||||
// The int32 value.
|
||||
int32 value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `uint32`.
|
||||
//
|
||||
// The JSON representation for `UInt32Value` is JSON number.
|
||||
message UInt32Value {
|
||||
// The uint32 value.
|
||||
uint32 value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `bool`.
|
||||
//
|
||||
// The JSON representation for `BoolValue` is JSON `true` and `false`.
|
||||
message BoolValue {
|
||||
// The bool value.
|
||||
bool value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `string`.
|
||||
//
|
||||
// The JSON representation for `StringValue` is JSON string.
|
||||
message StringValue {
|
||||
// The string value.
|
||||
string value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `bytes`.
|
||||
//
|
||||
// The JSON representation for `BytesValue` is JSON string.
|
||||
message BytesValue {
|
||||
// The bytes value.
|
||||
bytes value = 1;
|
||||
}
|
||||
49
server/node_modules/@google-cloud/firestore/build/protos/google/rpc/status.proto
generated
vendored
Normal file
49
server/node_modules/@google-cloud/firestore/build/protos/google/rpc/status.proto
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.rpc;
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "google.golang.org/genproto/googleapis/rpc/status;status";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "StatusProto";
|
||||
option java_package = "com.google.rpc";
|
||||
option objc_class_prefix = "RPC";
|
||||
|
||||
// The `Status` type defines a logical error model that is suitable for
|
||||
// different programming environments, including REST APIs and RPC APIs. It is
|
||||
// used by [gRPC](https://github.com/grpc). Each `Status` message contains
|
||||
// three pieces of data: error code, error message, and error details.
|
||||
//
|
||||
// You can find out more about this error model and how to work with it in the
|
||||
// [API Design Guide](https://cloud.google.com/apis/design/errors).
|
||||
message Status {
|
||||
// The status code, which should be an enum value of
|
||||
// [google.rpc.Code][google.rpc.Code].
|
||||
int32 code = 1;
|
||||
|
||||
// A developer-facing error message, which should be in English. Any
|
||||
// user-facing error message should be localized and sent in the
|
||||
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized
|
||||
// by the client.
|
||||
string message = 2;
|
||||
|
||||
// A list of messages that carry the error details. There is a common set of
|
||||
// message types for APIs to use.
|
||||
repeated google.protobuf.Any details = 3;
|
||||
}
|
||||
50
server/node_modules/@google-cloud/firestore/build/protos/google/type/dayofweek.proto
generated
vendored
Normal file
50
server/node_modules/@google-cloud/firestore/build/protos/google/type/dayofweek.proto
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.type;
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/type/dayofweek;dayofweek";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "DayOfWeekProto";
|
||||
option java_package = "com.google.type";
|
||||
option objc_class_prefix = "GTP";
|
||||
|
||||
// Represents a day of the week.
|
||||
enum DayOfWeek {
|
||||
// The day of the week is unspecified.
|
||||
DAY_OF_WEEK_UNSPECIFIED = 0;
|
||||
|
||||
// Monday
|
||||
MONDAY = 1;
|
||||
|
||||
// Tuesday
|
||||
TUESDAY = 2;
|
||||
|
||||
// Wednesday
|
||||
WEDNESDAY = 3;
|
||||
|
||||
// Thursday
|
||||
THURSDAY = 4;
|
||||
|
||||
// Friday
|
||||
FRIDAY = 5;
|
||||
|
||||
// Saturday
|
||||
SATURDAY = 6;
|
||||
|
||||
// Sunday
|
||||
SUNDAY = 7;
|
||||
}
|
||||
37
server/node_modules/@google-cloud/firestore/build/protos/google/type/latlng.proto
generated
vendored
Normal file
37
server/node_modules/@google-cloud/firestore/build/protos/google/type/latlng.proto
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.type;
|
||||
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "google.golang.org/genproto/googleapis/type/latlng;latlng";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "LatLngProto";
|
||||
option java_package = "com.google.type";
|
||||
option objc_class_prefix = "GTP";
|
||||
|
||||
// An object that represents a latitude/longitude pair. This is expressed as a
|
||||
// pair of doubles to represent degrees latitude and degrees longitude. Unless
|
||||
// specified otherwise, this must conform to the
|
||||
// <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
|
||||
// standard</a>. Values must be within normalized ranges.
|
||||
message LatLng {
|
||||
// The latitude in degrees. It must be in the range [-90.0, +90.0].
|
||||
double latitude = 1;
|
||||
|
||||
// The longitude in degrees. It must be in the range [-180.0, +180.0].
|
||||
double longitude = 2;
|
||||
}
|
||||
144
server/node_modules/@google-cloud/firestore/build/protos/update.sh
generated
vendored
Executable file
144
server/node_modules/@google-cloud/firestore/build/protos/update.sh
generated
vendored
Executable file
@@ -0,0 +1,144 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2018 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
echo "Running update.sh"
|
||||
echo $(npm --version)
|
||||
# Variables
|
||||
PROTOS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
WORK_DIR=`mktemp -d`
|
||||
cd ${PROTOS_DIR}
|
||||
|
||||
# deletes the temp directory on exit
|
||||
function cleanup {
|
||||
rm -rf "$WORK_DIR"
|
||||
echo "Deleted temp working directory $WORK_DIR"
|
||||
}
|
||||
|
||||
# register the cleanup function to be called on the EXIT signal
|
||||
trap cleanup EXIT
|
||||
|
||||
# Capture location of pbjs / pbts before we pushd.
|
||||
PBJS="$(npm root)/.bin/pbjs"
|
||||
PBTS="$(npm root)/.bin/pbts"
|
||||
|
||||
# Enter work dir
|
||||
pushd "$WORK_DIR"
|
||||
|
||||
# Clone necessary git repos.
|
||||
git clone --depth 1 https://github.com/googleapis/googleapis.git
|
||||
# Protobuf may have breaking changes, so it will be pinned to a specific release.
|
||||
# TODO(version) nodejs-firestore should maintain the version number of protobuf manually
|
||||
git clone --single-branch --branch v26.1 --depth 1 https://github.com/google/protobuf.git
|
||||
|
||||
# Copy necessary protos.
|
||||
mkdir -p "${PROTOS_DIR}/google/api"
|
||||
cp googleapis/google/api/{annotations,client,field_behavior,http,launch_stage,resource}.proto \
|
||||
"${PROTOS_DIR}/google/api/"
|
||||
|
||||
mkdir -p "${PROTOS_DIR}/google/firestore/v1"
|
||||
cp googleapis/google/firestore/v1/*.proto \
|
||||
"${PROTOS_DIR}/google/firestore/v1/"
|
||||
|
||||
mkdir -p "${PROTOS_DIR}/google/firestore/v1beta1"
|
||||
cp googleapis/google/firestore/v1beta1/*.proto \
|
||||
"${PROTOS_DIR}/google/firestore/v1beta1/"
|
||||
|
||||
mkdir -p "${PROTOS_DIR}/google/firestore/admin/v1"
|
||||
cp googleapis/google/firestore/admin/v1/*.proto \
|
||||
"${PROTOS_DIR}/google/firestore/admin/v1/"
|
||||
|
||||
mkdir -p "${PROTOS_DIR}/google/longrunning"
|
||||
cp googleapis/google/longrunning/operations.proto \
|
||||
"${PROTOS_DIR}/google/longrunning/"
|
||||
|
||||
mkdir -p "${PROTOS_DIR}/google/rpc"
|
||||
cp googleapis/google/rpc/status.proto \
|
||||
"${PROTOS_DIR}/google/rpc/"
|
||||
|
||||
mkdir -p "${PROTOS_DIR}/google/type"
|
||||
cp googleapis/google/type/{latlng,dayofweek}.proto \
|
||||
"${PROTOS_DIR}/google/type/"
|
||||
|
||||
mkdir -p "${PROTOS_DIR}/google/protobuf"
|
||||
cp protobuf/src/google/protobuf/{any,descriptor,empty,field_mask,struct,timestamp,wrappers,duration}.proto \
|
||||
"${PROTOS_DIR}/google/protobuf/"
|
||||
|
||||
popd
|
||||
|
||||
# Generate the Protobuf typings
|
||||
PBJS_ARGS=( -p . \
|
||||
--js_out=import_style=commonjs,binary:library \
|
||||
--target=static-module \
|
||||
--no-create \
|
||||
--no-encode \
|
||||
--no-decode \
|
||||
--no-verify \
|
||||
--no-delimited \
|
||||
--force-enum-string)
|
||||
|
||||
"${PBJS}" "${PBJS_ARGS[@]}" -o firestore_v1_proto_api.js \
|
||||
-r firestore_v1 \
|
||||
"google/firestore/v1/*.proto" \
|
||||
"firestore/*.proto" \
|
||||
"google/protobuf/*.proto" "google/type/*.proto" \
|
||||
"google/rpc/*.proto" "google/api/*.proto" \
|
||||
"google/longrunning/*.proto"
|
||||
perl -pi -e 's/number\|Long/number\|string/g' firestore_v1_proto_api.js
|
||||
"${PBTS}" -o firestore_v1_proto_api.d.ts firestore_v1_proto_api.js
|
||||
|
||||
"${PBJS}" "${PBJS_ARGS[@]}" -o firestore_admin_v1_proto_api.js \
|
||||
-r firestore_admin_v1 \
|
||||
"google/firestore/admin/v1/*.proto" \
|
||||
"google/protobuf/*.proto" "google/type/*.proto" \
|
||||
"google/rpc/*.proto" "google/api/*.proto" \
|
||||
"google/longrunning/*.proto"
|
||||
perl -pi -e 's/number\|Long/number\|string/g' firestore_admin_v1_proto_api.js
|
||||
"${PBTS}" -o firestore_admin_v1_proto_api.d.ts firestore_admin_v1_proto_api.js
|
||||
|
||||
"${PBJS}" "${PBJS_ARGS[@]}" -o firestore_v1beta1_proto_api.js \
|
||||
-r firestore_v1beta1 \
|
||||
"google/firestore/v1beta1/*.proto" \
|
||||
"google/protobuf/*.proto" "google/type/*.proto" \
|
||||
"google/rpc/*.proto" "google/api/*.proto" \
|
||||
"google/longrunning/*.proto"
|
||||
perl -pi -e 's/number\|Long/number\|string/g' firestore_v1beta1_proto_api.js
|
||||
"${PBTS}" -o firestore_v1beta1_proto_api.d.ts firestore_v1beta1_proto_api.js
|
||||
|
||||
"${PBJS}" -p . --target=json -o v1.json \
|
||||
-r firestore_v1 \
|
||||
"google/firestore/v1/*.proto" \
|
||||
"google/protobuf/*.proto" "google/type/*.proto" \
|
||||
"google/rpc/*.proto" "google/api/*.proto"
|
||||
|
||||
"${PBJS}" -p . --target=json -o admin_v1.json \
|
||||
-r firestore_admin_v1 \
|
||||
"google/firestore/admin/v1/*.proto" \
|
||||
"google/protobuf/*.proto" "google/type/*.proto" \
|
||||
"google/rpc/*.proto" "google/api/*.proto" \
|
||||
"google/longrunning/*.proto"
|
||||
|
||||
"${PBJS}" -p . --target=json -o v1beta1.json \
|
||||
-r firestore_v1beta1 \
|
||||
"google/firestore/v1beta1/*.proto" \
|
||||
"google/protobuf/*.proto" "google/type/*.proto" \
|
||||
"google/rpc/*.proto" "google/api/*.proto"
|
||||
|
||||
echo "Finished running update.sh"
|
||||
|
||||
node ../../scripts/license.js ../../build ../protos
|
||||
1
server/node_modules/@google-cloud/firestore/build/protos/v1.json
generated
vendored
Normal file
1
server/node_modules/@google-cloud/firestore/build/protos/v1.json
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1
server/node_modules/@google-cloud/firestore/build/protos/v1beta1.json
generated
vendored
Normal file
1
server/node_modules/@google-cloud/firestore/build/protos/v1beta1.json
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
95
server/node_modules/@google-cloud/firestore/build/src/aggregate.d.ts
generated
vendored
Normal file
95
server/node_modules/@google-cloud/firestore/build/src/aggregate.d.ts
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
/**
|
||||
* Copyright 2023 Google LLC. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import * as firestore from '@google-cloud/firestore';
|
||||
import { FieldPath } from './path';
|
||||
import { google } from '../protos/firestore_v1_proto_api';
|
||||
import IAggregation = google.firestore.v1.StructuredAggregationQuery.IAggregation;
|
||||
/**
|
||||
* Concrete implementation of the Aggregate type.
|
||||
*/
|
||||
export declare class Aggregate {
|
||||
readonly alias: string;
|
||||
readonly aggregateType: AggregateType;
|
||||
readonly fieldPath?: (string | FieldPath) | undefined;
|
||||
constructor(alias: string, aggregateType: AggregateType, fieldPath?: (string | FieldPath) | undefined);
|
||||
/**
|
||||
* Converts this object to the proto representation of an Aggregate.
|
||||
* @internal
|
||||
*/
|
||||
toProto(): IAggregation;
|
||||
}
|
||||
/**
|
||||
* Represents an aggregation that can be performed by Firestore.
|
||||
*/
|
||||
export declare class AggregateField<T> implements firestore.AggregateField<T> {
|
||||
readonly aggregateType: AggregateType;
|
||||
/** A type string to uniquely identify instances of this class. */
|
||||
readonly type = "AggregateField";
|
||||
/**
|
||||
* The field on which the aggregation is performed.
|
||||
* @internal
|
||||
**/
|
||||
readonly _field?: string | FieldPath;
|
||||
/**
|
||||
* Create a new AggregateField<T>
|
||||
* @param aggregateType Specifies the type of aggregation operation to perform.
|
||||
* @param field Optionally specifies the field that is aggregated.
|
||||
* @internal
|
||||
*/
|
||||
private constructor();
|
||||
/**
|
||||
* Compares this object with the given object for equality.
|
||||
*
|
||||
* This object is considered "equal" to the other object if and only if
|
||||
* `other` performs the same kind of aggregation on the same field (if any).
|
||||
*
|
||||
* @param other The object to compare to this object for equality.
|
||||
* @return `true` if this object is "equal" to the given object, as
|
||||
* defined above, or `false` otherwise.
|
||||
*/
|
||||
isEqual(other: AggregateField<T>): boolean;
|
||||
/**
|
||||
* Create an AggregateField object that can be used to compute the count of
|
||||
* documents in the result set of a query.
|
||||
*/
|
||||
static count(): AggregateField<number>;
|
||||
/**
|
||||
* Create an AggregateField object that can be used to compute the average of
|
||||
* a specified field over a range of documents in the result set of a query.
|
||||
* @param field Specifies the field to average across the result set.
|
||||
*/
|
||||
static average(field: string | FieldPath): AggregateField<number | null>;
|
||||
/**
|
||||
* Create an AggregateField object that can be used to compute the sum of
|
||||
* a specified field over a range of documents in the result set of a query.
|
||||
* @param field Specifies the field to sum across the result set.
|
||||
*/
|
||||
static sum(field: string | FieldPath): AggregateField<number>;
|
||||
}
|
||||
/**
|
||||
* A type whose property values are all `AggregateField` objects.
|
||||
*/
|
||||
export interface AggregateSpec {
|
||||
[field: string]: AggregateFieldType;
|
||||
}
|
||||
/**
|
||||
* The union of all `AggregateField` types that are supported by Firestore.
|
||||
*/
|
||||
export type AggregateFieldType = ReturnType<typeof AggregateField.count> | ReturnType<typeof AggregateField.sum> | ReturnType<typeof AggregateField.average>;
|
||||
/**
|
||||
* Union type representing the aggregate type to be performed.
|
||||
*/
|
||||
export type AggregateType = 'count' | 'avg' | 'sum';
|
||||
122
server/node_modules/@google-cloud/firestore/build/src/aggregate.js
generated
vendored
Normal file
122
server/node_modules/@google-cloud/firestore/build/src/aggregate.js
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
"use strict";
|
||||
/**
|
||||
* Copyright 2023 Google LLC. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.AggregateField = exports.Aggregate = void 0;
|
||||
const path_1 = require("./path");
|
||||
const assert = require("assert");
|
||||
/**
|
||||
* Concrete implementation of the Aggregate type.
|
||||
*/
|
||||
class Aggregate {
|
||||
constructor(alias, aggregateType, fieldPath) {
|
||||
this.alias = alias;
|
||||
this.aggregateType = aggregateType;
|
||||
this.fieldPath = fieldPath;
|
||||
}
|
||||
/**
|
||||
* Converts this object to the proto representation of an Aggregate.
|
||||
* @internal
|
||||
*/
|
||||
toProto() {
|
||||
const proto = {};
|
||||
if (this.aggregateType === 'count') {
|
||||
proto.count = {};
|
||||
}
|
||||
else if (this.aggregateType === 'sum') {
|
||||
assert(this.fieldPath !== undefined, 'Missing field path for sum aggregation.');
|
||||
proto.sum = {
|
||||
field: {
|
||||
fieldPath: path_1.FieldPath.fromArgument(this.fieldPath).formattedName,
|
||||
},
|
||||
};
|
||||
}
|
||||
else if (this.aggregateType === 'avg') {
|
||||
assert(this.fieldPath !== undefined, 'Missing field path for average aggregation.');
|
||||
proto.avg = {
|
||||
field: {
|
||||
fieldPath: path_1.FieldPath.fromArgument(this.fieldPath).formattedName,
|
||||
},
|
||||
};
|
||||
}
|
||||
else {
|
||||
throw new Error(`Aggregate type ${this.aggregateType} unimplemented.`);
|
||||
}
|
||||
proto.alias = this.alias;
|
||||
return proto;
|
||||
}
|
||||
}
|
||||
exports.Aggregate = Aggregate;
|
||||
/**
|
||||
* Represents an aggregation that can be performed by Firestore.
|
||||
*/
|
||||
class AggregateField {
|
||||
/**
|
||||
* Create a new AggregateField<T>
|
||||
* @param aggregateType Specifies the type of aggregation operation to perform.
|
||||
* @param field Optionally specifies the field that is aggregated.
|
||||
* @internal
|
||||
*/
|
||||
constructor(aggregateType, field) {
|
||||
this.aggregateType = aggregateType;
|
||||
/** A type string to uniquely identify instances of this class. */
|
||||
this.type = 'AggregateField';
|
||||
this._field = field;
|
||||
}
|
||||
/**
|
||||
* Compares this object with the given object for equality.
|
||||
*
|
||||
* This object is considered "equal" to the other object if and only if
|
||||
* `other` performs the same kind of aggregation on the same field (if any).
|
||||
*
|
||||
* @param other The object to compare to this object for equality.
|
||||
* @return `true` if this object is "equal" to the given object, as
|
||||
* defined above, or `false` otherwise.
|
||||
*/
|
||||
isEqual(other) {
|
||||
return (other instanceof AggregateField &&
|
||||
this.aggregateType === other.aggregateType &&
|
||||
((this._field === undefined && other._field === undefined) ||
|
||||
(this._field !== undefined &&
|
||||
other._field !== undefined &&
|
||||
path_1.FieldPath.fromArgument(this._field).isEqual(path_1.FieldPath.fromArgument(other._field)))));
|
||||
}
|
||||
/**
|
||||
* Create an AggregateField object that can be used to compute the count of
|
||||
* documents in the result set of a query.
|
||||
*/
|
||||
static count() {
|
||||
return new AggregateField('count');
|
||||
}
|
||||
/**
|
||||
* Create an AggregateField object that can be used to compute the average of
|
||||
* a specified field over a range of documents in the result set of a query.
|
||||
* @param field Specifies the field to average across the result set.
|
||||
*/
|
||||
static average(field) {
|
||||
return new AggregateField('avg', field);
|
||||
}
|
||||
/**
|
||||
* Create an AggregateField object that can be used to compute the sum of
|
||||
* a specified field over a range of documents in the result set of a query.
|
||||
* @param field Specifies the field to sum across the result set.
|
||||
*/
|
||||
static sum(field) {
|
||||
return new AggregateField('sum', field);
|
||||
}
|
||||
}
|
||||
exports.AggregateField = AggregateField;
|
||||
//# sourceMappingURL=aggregate.js.map
|
||||
177
server/node_modules/@google-cloud/firestore/build/src/backoff.d.ts
generated
vendored
Normal file
177
server/node_modules/@google-cloud/firestore/build/src/backoff.d.ts
generated
vendored
Normal file
@@ -0,0 +1,177 @@
|
||||
/*!
|
||||
* Copyright 2017 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
/*!
|
||||
* The default initial backoff time in milliseconds after an error.
|
||||
* Set to 1s according to https://cloud.google.com/apis/design/errors.
|
||||
*/
|
||||
export declare const DEFAULT_BACKOFF_INITIAL_DELAY_MS = 1000;
|
||||
/*!
|
||||
* The default maximum backoff time in milliseconds.
|
||||
*/
|
||||
export declare const DEFAULT_BACKOFF_MAX_DELAY_MS: number;
|
||||
/*!
|
||||
* The default factor to increase the backup by after each failed attempt.
|
||||
*/
|
||||
export declare const DEFAULT_BACKOFF_FACTOR = 1.5;
|
||||
/*!
|
||||
* The maximum number of retries that will be attempted by backoff
|
||||
* before stopping all retry attempts.
|
||||
*/
|
||||
export declare const MAX_RETRY_ATTEMPTS = 10;
|
||||
/*!
|
||||
* The timeout handler used by `ExponentialBackoff` and `BulkWriter`.
|
||||
*/
|
||||
export declare let delayExecution: (f: () => void, ms: number) => NodeJS.Timeout;
|
||||
/**
|
||||
* Allows overriding of the timeout handler used by the exponential backoff
|
||||
* implementation. If not invoked, we default to `setTimeout()`.
|
||||
*
|
||||
* Used only in testing.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param {function} handler A handler than matches the API of `setTimeout()`.
|
||||
*/
|
||||
export declare function setTimeoutHandler(handler: (f: () => void, ms: number) => void): void;
|
||||
/**
|
||||
* Configuration object to adjust the delays of the exponential backoff
|
||||
* algorithm.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export interface ExponentialBackoffSetting {
|
||||
/** Optional override for the initial retry delay. */
|
||||
initialDelayMs?: number;
|
||||
/** Optional override for the exponential backoff factor. */
|
||||
backoffFactor?: number;
|
||||
/** Optional override for the maximum retry delay. */
|
||||
maxDelayMs?: number;
|
||||
/**
|
||||
* Optional override to control the itter factor by which to randomize
|
||||
* attempts (0 means no randomization, 1.0 means +/-50% randomization). It is
|
||||
* suggested not to exceed this range.
|
||||
*/
|
||||
jitterFactor?: number;
|
||||
}
|
||||
/**
|
||||
* A helper for running delayed tasks following an exponential backoff curve
|
||||
* between attempts.
|
||||
*
|
||||
* Each delay is made up of a "base" delay which follows the exponential
|
||||
* backoff curve, and a "jitter" (+/- 50% by default) that is calculated and
|
||||
* added to the base delay. This prevents clients from accidentally
|
||||
* synchronizing their delays causing spikes of load to the backend.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare class ExponentialBackoff {
|
||||
/**
|
||||
* The initial delay (used as the base delay on the first retry attempt).
|
||||
* Note that jitter will still be applied, so the actual delay could be as
|
||||
* little as 0.5*initialDelayMs (based on a jitter factor of 1.0).
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private readonly initialDelayMs;
|
||||
/**
|
||||
* The multiplier to use to determine the extended base delay after each
|
||||
* attempt.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private readonly backoffFactor;
|
||||
/**
|
||||
* The maximum base delay after which no further backoff is performed.
|
||||
* Note that jitter will still be applied, so the actual delay could be as
|
||||
* much as 1.5*maxDelayMs (based on a jitter factor of 1.0).
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private readonly maxDelayMs;
|
||||
/**
|
||||
* The jitter factor that controls the random distribution of the backoff
|
||||
* points.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private readonly jitterFactor;
|
||||
/**
|
||||
* The number of retries that has been attempted.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _retryCount;
|
||||
/**
|
||||
* The backoff delay of the current attempt.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private currentBaseMs;
|
||||
/**
|
||||
* Whether we are currently waiting for backoff to complete.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private awaitingBackoffCompletion;
|
||||
constructor(options?: ExponentialBackoffSetting);
|
||||
/**
|
||||
* Resets the backoff delay and retry count.
|
||||
*
|
||||
* The very next backoffAndWait() will have no delay. If it is called again
|
||||
* (i.e. due to an error), initialDelayMs (plus jitter) will be used, and
|
||||
* subsequent ones will increase according to the backoffFactor.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
reset(): void;
|
||||
/**
|
||||
* Resets the backoff delay to the maximum delay (e.g. for use after a
|
||||
* RESOURCE_EXHAUSTED error).
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
resetToMax(): void;
|
||||
/**
|
||||
* Returns a promise that resolves after currentDelayMs, and increases the
|
||||
* delay for any subsequent attempts.
|
||||
*
|
||||
* @return A Promise that resolves when the current delay elapsed.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
backoffAndWait(): Promise<void>;
|
||||
get retryCount(): number;
|
||||
/**
|
||||
* Returns a randomized "jitter" delay based on the current base and jitter
|
||||
* factor.
|
||||
*
|
||||
* @returns {number} The jitter to apply based on the current delay.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private jitterDelayMs;
|
||||
}
|
||||
225
server/node_modules/@google-cloud/firestore/build/src/backoff.js
generated
vendored
Normal file
225
server/node_modules/@google-cloud/firestore/build/src/backoff.js
generated
vendored
Normal file
@@ -0,0 +1,225 @@
|
||||
"use strict";
|
||||
/*!
|
||||
* Copyright 2017 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ExponentialBackoff = exports.delayExecution = exports.MAX_RETRY_ATTEMPTS = exports.DEFAULT_BACKOFF_FACTOR = exports.DEFAULT_BACKOFF_MAX_DELAY_MS = exports.DEFAULT_BACKOFF_INITIAL_DELAY_MS = void 0;
|
||||
exports.setTimeoutHandler = setTimeoutHandler;
|
||||
const logger_1 = require("./logger");
|
||||
/*
|
||||
* @module firestore/backoff
|
||||
* @private
|
||||
* @internal
|
||||
*
|
||||
* Contains backoff logic to facilitate RPC error handling. This class derives
|
||||
* its implementation from the Firestore Mobile Web Client.
|
||||
*
|
||||
* @see https://github.com/firebase/firebase-js-sdk/blob/master/packages/firestore/src/remote/backoff.ts
|
||||
*/
|
||||
/*!
|
||||
* The default initial backoff time in milliseconds after an error.
|
||||
* Set to 1s according to https://cloud.google.com/apis/design/errors.
|
||||
*/
|
||||
exports.DEFAULT_BACKOFF_INITIAL_DELAY_MS = 1000;
|
||||
/*!
|
||||
* The default maximum backoff time in milliseconds.
|
||||
*/
|
||||
exports.DEFAULT_BACKOFF_MAX_DELAY_MS = 60 * 1000;
|
||||
/*!
|
||||
* The default factor to increase the backup by after each failed attempt.
|
||||
*/
|
||||
exports.DEFAULT_BACKOFF_FACTOR = 1.5;
|
||||
/*!
|
||||
* The default jitter to distribute the backoff attempts by (0 means no
|
||||
* randomization, 1.0 means +/-50% randomization).
|
||||
*/
|
||||
const DEFAULT_JITTER_FACTOR = 1.0;
|
||||
/*!
|
||||
* The maximum number of retries that will be attempted by backoff
|
||||
* before stopping all retry attempts.
|
||||
*/
|
||||
exports.MAX_RETRY_ATTEMPTS = 10;
|
||||
/*!
|
||||
* The timeout handler used by `ExponentialBackoff` and `BulkWriter`.
|
||||
*/
|
||||
exports.delayExecution = setTimeout;
|
||||
/**
|
||||
* Allows overriding of the timeout handler used by the exponential backoff
|
||||
* implementation. If not invoked, we default to `setTimeout()`.
|
||||
*
|
||||
* Used only in testing.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param {function} handler A handler than matches the API of `setTimeout()`.
|
||||
*/
|
||||
function setTimeoutHandler(handler) {
|
||||
exports.delayExecution = (f, ms) => {
|
||||
handler(f, ms);
|
||||
const timeout = {
|
||||
hasRef: () => {
|
||||
throw new Error('For tests only. Not Implemented');
|
||||
},
|
||||
ref: () => {
|
||||
throw new Error('For tests only. Not Implemented');
|
||||
},
|
||||
refresh: () => {
|
||||
throw new Error('For tests only. Not Implemented');
|
||||
},
|
||||
unref: () => {
|
||||
throw new Error('For tests only. Not Implemented');
|
||||
},
|
||||
[Symbol.toPrimitive]: () => {
|
||||
throw new Error('For tests only. Not Implemented');
|
||||
},
|
||||
};
|
||||
// `NodeJS.Timeout` type signature change:
|
||||
// https://github.com/DefinitelyTyped/DefinitelyTyped/pull/66176/files#diff-e838d0ace9cd5f6516bacfbd3ad00d02cd37bd60f9993ce6223f52d889a1fdbaR122-R126
|
||||
//
|
||||
// Adding `[Symbol.dispose](): void;` cannot be done on older versions of
|
||||
// NodeJS. So we simply cast to `NodeJS.Timeout`.
|
||||
return timeout;
|
||||
};
|
||||
}
|
||||
/**
|
||||
* A helper for running delayed tasks following an exponential backoff curve
|
||||
* between attempts.
|
||||
*
|
||||
* Each delay is made up of a "base" delay which follows the exponential
|
||||
* backoff curve, and a "jitter" (+/- 50% by default) that is calculated and
|
||||
* added to the base delay. This prevents clients from accidentally
|
||||
* synchronizing their delays causing spikes of load to the backend.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class ExponentialBackoff {
|
||||
constructor(options = {}) {
|
||||
/**
|
||||
* The number of retries that has been attempted.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this._retryCount = 0;
|
||||
/**
|
||||
* The backoff delay of the current attempt.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this.currentBaseMs = 0;
|
||||
/**
|
||||
* Whether we are currently waiting for backoff to complete.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this.awaitingBackoffCompletion = false;
|
||||
this.initialDelayMs =
|
||||
options.initialDelayMs !== undefined
|
||||
? options.initialDelayMs
|
||||
: exports.DEFAULT_BACKOFF_INITIAL_DELAY_MS;
|
||||
this.backoffFactor =
|
||||
options.backoffFactor !== undefined
|
||||
? options.backoffFactor
|
||||
: exports.DEFAULT_BACKOFF_FACTOR;
|
||||
this.maxDelayMs =
|
||||
options.maxDelayMs !== undefined
|
||||
? options.maxDelayMs
|
||||
: exports.DEFAULT_BACKOFF_MAX_DELAY_MS;
|
||||
this.jitterFactor =
|
||||
options.jitterFactor !== undefined
|
||||
? options.jitterFactor
|
||||
: DEFAULT_JITTER_FACTOR;
|
||||
}
|
||||
/**
|
||||
* Resets the backoff delay and retry count.
|
||||
*
|
||||
* The very next backoffAndWait() will have no delay. If it is called again
|
||||
* (i.e. due to an error), initialDelayMs (plus jitter) will be used, and
|
||||
* subsequent ones will increase according to the backoffFactor.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
reset() {
|
||||
this._retryCount = 0;
|
||||
this.currentBaseMs = 0;
|
||||
}
|
||||
/**
|
||||
* Resets the backoff delay to the maximum delay (e.g. for use after a
|
||||
* RESOURCE_EXHAUSTED error).
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
resetToMax() {
|
||||
this.currentBaseMs = this.maxDelayMs;
|
||||
}
|
||||
/**
|
||||
* Returns a promise that resolves after currentDelayMs, and increases the
|
||||
* delay for any subsequent attempts.
|
||||
*
|
||||
* @return A Promise that resolves when the current delay elapsed.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
backoffAndWait() {
|
||||
if (this.awaitingBackoffCompletion) {
|
||||
return Promise.reject(new Error('A backoff operation is already in progress.'));
|
||||
}
|
||||
if (this.retryCount > exports.MAX_RETRY_ATTEMPTS) {
|
||||
return Promise.reject(new Error('Exceeded maximum number of retries allowed.'));
|
||||
}
|
||||
// First schedule using the current base (which may be 0 and should be
|
||||
// honored as such).
|
||||
const delayWithJitterMs = this.currentBaseMs + this.jitterDelayMs();
|
||||
if (this.currentBaseMs > 0) {
|
||||
(0, logger_1.logger)('ExponentialBackoff.backoffAndWait', null, `Backing off for ${delayWithJitterMs} ms ` +
|
||||
`(base delay: ${this.currentBaseMs} ms)`);
|
||||
}
|
||||
// Apply backoff factor to determine next delay and ensure it is within
|
||||
// bounds.
|
||||
this.currentBaseMs *= this.backoffFactor;
|
||||
this.currentBaseMs = Math.max(this.currentBaseMs, this.initialDelayMs);
|
||||
this.currentBaseMs = Math.min(this.currentBaseMs, this.maxDelayMs);
|
||||
this._retryCount += 1;
|
||||
return new Promise(resolve => {
|
||||
this.awaitingBackoffCompletion = true;
|
||||
(0, exports.delayExecution)(() => {
|
||||
this.awaitingBackoffCompletion = false;
|
||||
resolve();
|
||||
}, delayWithJitterMs);
|
||||
});
|
||||
}
|
||||
// Visible for testing.
|
||||
get retryCount() {
|
||||
return this._retryCount;
|
||||
}
|
||||
/**
|
||||
* Returns a randomized "jitter" delay based on the current base and jitter
|
||||
* factor.
|
||||
*
|
||||
* @returns {number} The jitter to apply based on the current delay.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
jitterDelayMs() {
|
||||
return (Math.random() - 0.5) * this.jitterFactor * this.currentBaseMs;
|
||||
}
|
||||
}
|
||||
exports.ExponentialBackoff = ExponentialBackoff;
|
||||
//# sourceMappingURL=backoff.js.map
|
||||
529
server/node_modules/@google-cloud/firestore/build/src/bulk-writer.d.ts
generated
vendored
Normal file
529
server/node_modules/@google-cloud/firestore/build/src/bulk-writer.d.ts
generated
vendored
Normal file
@@ -0,0 +1,529 @@
|
||||
/*!
|
||||
* Copyright 2020 Google LLC
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import * as firestore from '@google-cloud/firestore';
|
||||
import type { GoogleError } from 'google-gax';
|
||||
import { FieldPath, Firestore } from '.';
|
||||
import { RateLimiter } from './rate-limiter';
|
||||
import { Timestamp } from './timestamp';
|
||||
import { WriteBatch, WriteResult } from './write-batch';
|
||||
import GrpcStatus = FirebaseFirestore.GrpcStatus;
|
||||
/*!
|
||||
* The maximum number of writes can be can in a single batch that is being retried.
|
||||
*/
|
||||
export declare const RETRY_MAX_BATCH_SIZE = 10;
|
||||
/*!
|
||||
* The starting maximum number of operations per second as allowed by the
|
||||
* 500/50/5 rule.
|
||||
*
|
||||
* https://firebase.google.com/docs/firestore/best-practices#ramping_up_traffic.
|
||||
*/
|
||||
export declare const DEFAULT_INITIAL_OPS_PER_SECOND_LIMIT = 500;
|
||||
/*!
|
||||
* The maximum number of operations per second as allowed by the 500/50/5 rule.
|
||||
* By default the rate limiter will not exceed this value.
|
||||
*
|
||||
* https://firebase.google.com/docs/firestore/best-practices#ramping_up_traffic.
|
||||
*/
|
||||
export declare const DEFAULT_MAXIMUM_OPS_PER_SECOND_LIMIT = 10000;
|
||||
/*!
|
||||
* The default jitter to apply to the exponential backoff used in retries. For
|
||||
* example, a factor of 0.3 means a 30% jitter is applied.
|
||||
*/
|
||||
export declare const DEFAULT_JITTER_FACTOR = 0.3;
|
||||
/**
|
||||
* Represents a single write for BulkWriter, encapsulating operation dispatch
|
||||
* and error handling.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
declare class BulkWriterOperation {
|
||||
readonly ref: firestore.DocumentReference<unknown>;
|
||||
private readonly type;
|
||||
private readonly sendFn;
|
||||
private readonly errorFn;
|
||||
private readonly successFn;
|
||||
private deferred;
|
||||
private failedAttempts;
|
||||
private lastStatus?;
|
||||
private _backoffDuration;
|
||||
/** Whether flush() was called when this was the last enqueued operation. */
|
||||
private _flushed;
|
||||
/**
|
||||
* @param ref The document reference being written to.
|
||||
* @param type The type of operation that created this write.
|
||||
* @param sendFn A callback to invoke when the operation should be sent.
|
||||
* @param errorFn The user provided global error callback.
|
||||
* @param successFn The user provided global success callback.
|
||||
*/
|
||||
constructor(ref: firestore.DocumentReference<unknown>, type: 'create' | 'set' | 'update' | 'delete', sendFn: (op: BulkWriterOperation) => void, errorFn: (error: BulkWriterError) => boolean, successFn: (ref: firestore.DocumentReference<unknown>, result: WriteResult) => void);
|
||||
get promise(): Promise<WriteResult>;
|
||||
get backoffDuration(): number;
|
||||
markFlushed(): void;
|
||||
get flushed(): boolean;
|
||||
onError(error: GoogleError): void;
|
||||
private updateBackoffDuration;
|
||||
onSuccess(result: WriteResult): void;
|
||||
}
|
||||
/**
|
||||
* Used to represent a batch on the BatchQueue.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
declare class BulkCommitBatch extends WriteBatch {
|
||||
readonly docPaths: Set<string>;
|
||||
readonly pendingOps: Array<BulkWriterOperation>;
|
||||
private _maxBatchSize;
|
||||
constructor(firestore: Firestore, maxBatchSize: number);
|
||||
get maxBatchSize(): number;
|
||||
setMaxBatchSize(size: number): void;
|
||||
has(documentRef: firestore.DocumentReference<unknown>): boolean;
|
||||
bulkCommit(options?: {
|
||||
requestTag?: string;
|
||||
}): Promise<void>;
|
||||
/**
|
||||
* Helper to update data structures associated with the operation and returns
|
||||
* the result.
|
||||
*/
|
||||
processLastOperation(op: BulkWriterOperation): void;
|
||||
}
|
||||
/**
|
||||
* The error thrown when a BulkWriter operation fails.
|
||||
*
|
||||
* @class BulkWriterError
|
||||
*/
|
||||
export declare class BulkWriterError extends Error {
|
||||
/** The status code of the error. */
|
||||
readonly code: GrpcStatus;
|
||||
/** The error message of the error. */
|
||||
readonly message: string;
|
||||
/** The document reference the operation was performed on. */
|
||||
readonly documentRef: firestore.DocumentReference<any, any>;
|
||||
/** The type of operation performed. */
|
||||
readonly operationType: 'create' | 'set' | 'update' | 'delete';
|
||||
/** How many times this operation has been attempted unsuccessfully. */
|
||||
readonly failedAttempts: number;
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
constructor(
|
||||
/** The status code of the error. */
|
||||
code: GrpcStatus,
|
||||
/** The error message of the error. */
|
||||
message: string,
|
||||
/** The document reference the operation was performed on. */
|
||||
documentRef: firestore.DocumentReference<any, any>,
|
||||
/** The type of operation performed. */
|
||||
operationType: 'create' | 'set' | 'update' | 'delete',
|
||||
/** How many times this operation has been attempted unsuccessfully. */
|
||||
failedAttempts: number);
|
||||
}
|
||||
/**
|
||||
* A Firestore BulkWriter that can be used to perform a large number of writes
|
||||
* in parallel.
|
||||
*
|
||||
* @class BulkWriter
|
||||
*/
|
||||
export declare class BulkWriter {
|
||||
private readonly firestore;
|
||||
/**
|
||||
* The maximum number of writes that can be in a single batch.
|
||||
* Visible for testing.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _maxBatchSize;
|
||||
/**
|
||||
* The batch that is currently used to schedule operations. Once this batch
|
||||
* reaches maximum capacity, a new batch is created.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _bulkCommitBatch;
|
||||
/**
|
||||
* A pointer to the tail of all active BulkWriter operations. This pointer
|
||||
* is advanced every time a new write is enqueued.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _lastOp;
|
||||
/**
|
||||
* When this BulkWriter instance has started to close, a flush promise is
|
||||
* saved. Afterwards, no new operations can be enqueued, except for retry
|
||||
* operations scheduled by the error handler.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _closePromise;
|
||||
/**
|
||||
* Rate limiter used to throttle requests as per the 500/50/5 rule.
|
||||
* Visible for testing.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
readonly _rateLimiter: RateLimiter;
|
||||
/**
|
||||
* The number of pending operations enqueued on this BulkWriter instance.
|
||||
* An operation is considered pending if BulkWriter has sent it via RPC and
|
||||
* is awaiting the result.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _pendingOpsCount;
|
||||
/**
|
||||
* An array containing buffered BulkWriter operations after the maximum number
|
||||
* of pending operations has been enqueued.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _bufferedOperations;
|
||||
/**
|
||||
* Whether a custom error handler has been set. BulkWriter only swallows
|
||||
* errors if an error handler is set. Otherwise, an UnhandledPromiseRejection
|
||||
* is thrown by Node if an operation promise is rejected without being
|
||||
* handled.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _errorHandlerSet;
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_getBufferedOperationsCount(): number;
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_setMaxBatchSize(size: number): void;
|
||||
/**
|
||||
* The maximum number of pending operations that can be enqueued onto this
|
||||
* BulkWriter instance. Once the this number of writes have been enqueued,
|
||||
* subsequent writes are buffered.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _maxPendingOpCount;
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_setMaxPendingOpCount(newMax: number): void;
|
||||
/**
|
||||
* The user-provided callback to be run every time a BulkWriter operation
|
||||
* successfully completes.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _successFn;
|
||||
/**
|
||||
* The user-provided callback to be run every time a BulkWriter operation
|
||||
* fails.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _errorFn;
|
||||
/** @private */
|
||||
constructor(firestore: Firestore, options?: firestore.BulkWriterOptions);
|
||||
/**
|
||||
* Create a document with the provided data. This single operation will fail
|
||||
* if a document exists at its location.
|
||||
*
|
||||
* @param {DocumentReference} documentRef A reference to the document to be
|
||||
* created.
|
||||
* @param {T} data The object to serialize as the document.
|
||||
* @throws {Error} If the provided input is not a valid Firestore document.
|
||||
* @returns {Promise<WriteResult>} A promise that resolves with the result of
|
||||
* the write. If the write fails, the promise is rejected with a
|
||||
* [BulkWriterError]{@link BulkWriterError}.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let bulkWriter = firestore.bulkWriter();
|
||||
* let documentRef = firestore.collection('col').doc();
|
||||
*
|
||||
* bulkWriter
|
||||
* .create(documentRef, {foo: 'bar'})
|
||||
* .then(result => {
|
||||
* console.log('Successfully executed write at: ', result);
|
||||
* })
|
||||
* .catch(err => {
|
||||
* console.log('Write failed with: ', err);
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
create<AppModelType, DbModelType extends firestore.DocumentData>(documentRef: firestore.DocumentReference<AppModelType, DbModelType>, data: firestore.WithFieldValue<AppModelType>): Promise<WriteResult>;
|
||||
/**
|
||||
* Delete a document from the database.
|
||||
*
|
||||
* @param {DocumentReference} documentRef A reference to the document to be
|
||||
* deleted.
|
||||
* @param {Precondition=} precondition A precondition to enforce for this
|
||||
* delete.
|
||||
* @param {Timestamp=} precondition.lastUpdateTime If set, enforces that the
|
||||
* document was last updated at lastUpdateTime. Fails the batch if the
|
||||
* document doesn't exist or was last updated at a different time.
|
||||
* @returns {Promise<WriteResult>} A promise that resolves with the result of
|
||||
* the delete. If the delete fails, the promise is rejected with a
|
||||
* [BulkWriterError]{@link BulkWriterError}.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let bulkWriter = firestore.bulkWriter();
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* bulkWriter
|
||||
* .delete(documentRef)
|
||||
* .then(result => {
|
||||
* console.log('Successfully deleted document');
|
||||
* })
|
||||
* .catch(err => {
|
||||
* console.log('Delete failed with: ', err);
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
delete<AppModelType, DbModelType extends firestore.DocumentData>(documentRef: firestore.DocumentReference<AppModelType, DbModelType>, precondition?: firestore.Precondition): Promise<WriteResult>;
|
||||
set<AppModelType, DbModelType extends firestore.DocumentData>(documentRef: firestore.DocumentReference<AppModelType, DbModelType>, data: Partial<AppModelType>, options: firestore.SetOptions): Promise<WriteResult>;
|
||||
set<AppModelType, DbModelType extends firestore.DocumentData>(documentRef: firestore.DocumentReference<AppModelType, DbModelType>, data: AppModelType): Promise<WriteResult>;
|
||||
/**
|
||||
* Update fields of the document referred to by the provided
|
||||
* [DocumentReference]{@link DocumentReference}. If the document doesn't yet
|
||||
* exist, the update fails and the entire batch will be rejected.
|
||||
*
|
||||
* The update() method accepts either an object with field paths encoded as
|
||||
* keys and field values encoded as values, or a variable number of arguments
|
||||
* that alternate between field paths and field values. Nested fields can be
|
||||
* updated by providing dot-separated field path strings or by providing
|
||||
* FieldPath objects.
|
||||
*
|
||||
*
|
||||
* A Precondition restricting this update can be specified as the last
|
||||
* argument.
|
||||
*
|
||||
* @param {DocumentReference} documentRef A reference to the document to be
|
||||
* updated.
|
||||
* @param {UpdateData|string|FieldPath} dataOrField An object containing the
|
||||
* fields and values with which to update the document or the path of the
|
||||
* first field to update.
|
||||
* @param {...(Precondition|*|string|FieldPath)} preconditionOrValues - An
|
||||
* alternating list of field paths and values to update or a Precondition to
|
||||
* restrict this update
|
||||
* @throws {Error} If the provided input is not valid Firestore data.
|
||||
* @returns {Promise<WriteResult>} A promise that resolves with the result of
|
||||
* the write. If the write fails, the promise is rejected with a
|
||||
* [BulkWriterError]{@link BulkWriterError}.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let bulkWriter = firestore.bulkWriter();
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* bulkWriter
|
||||
* .update(documentRef, {foo: 'bar'})
|
||||
* .then(result => {
|
||||
* console.log('Successfully executed write at: ', result);
|
||||
* })
|
||||
* .catch(err => {
|
||||
* console.log('Write failed with: ', err);
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
update<AppModelType, DbModelType extends firestore.DocumentData>(documentRef: firestore.DocumentReference<AppModelType, DbModelType>, dataOrField: firestore.UpdateData<DbModelType> | string | FieldPath, ...preconditionOrValues: Array<{
|
||||
lastUpdateTime?: Timestamp;
|
||||
} | unknown | string | FieldPath>): Promise<WriteResult>;
|
||||
/**
|
||||
* Callback function set by {@link BulkWriter#onWriteResult} that is run
|
||||
* every time a {@link BulkWriter} operation successfully completes.
|
||||
*
|
||||
* @callback BulkWriter~successCallback
|
||||
* @param {DocumentReference} documentRef The document reference the
|
||||
* operation was performed on
|
||||
* @param {WriteResult} result The server write time of the operation.
|
||||
*/
|
||||
/**
|
||||
* Attaches a listener that is run every time a BulkWriter operation
|
||||
* successfully completes.
|
||||
*
|
||||
* @param {BulkWriter~successCallback} successCallback A callback to be
|
||||
* called every time a BulkWriter operation successfully completes.
|
||||
* @example
|
||||
* ```
|
||||
* let bulkWriter = firestore.bulkWriter();
|
||||
*
|
||||
* bulkWriter
|
||||
* .onWriteResult((documentRef, result) => {
|
||||
* console.log(
|
||||
* 'Successfully executed write on document: ',
|
||||
* documentRef,
|
||||
* ' at: ',
|
||||
* result
|
||||
* );
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
onWriteResult(successCallback: (documentRef: firestore.DocumentReference<any, any>, result: WriteResult) => void): void;
|
||||
/**
|
||||
* Callback function set by {@link BulkWriter#onWriteError} that is run when
|
||||
* a write fails in order to determine whether {@link BulkWriter} should
|
||||
* retry the operation.
|
||||
*
|
||||
* @callback BulkWriter~shouldRetryCallback
|
||||
* @param {BulkWriterError} error The error object with information about the
|
||||
* operation and error.
|
||||
* @returns {boolean} Whether or not to retry the failed operation. Returning
|
||||
* `true` retries the operation. Returning `false` will stop the retry loop.
|
||||
*/
|
||||
/**
|
||||
* Attaches an error handler listener that is run every time a BulkWriter
|
||||
* operation fails.
|
||||
*
|
||||
* BulkWriter has a default error handler that retries UNAVAILABLE and
|
||||
* ABORTED errors up to a maximum of 10 failed attempts. When an error
|
||||
* handler is specified, the default error handler will be overwritten.
|
||||
*
|
||||
* @param shouldRetryCallback {BulkWriter~shouldRetryCallback} A callback to
|
||||
* be called every time a BulkWriter operation fails. Returning `true` will
|
||||
* retry the operation. Returning `false` will stop the retry loop.
|
||||
* @example
|
||||
* ```
|
||||
* let bulkWriter = firestore.bulkWriter();
|
||||
*
|
||||
* bulkWriter
|
||||
* .onWriteError((error) => {
|
||||
* if (
|
||||
* error.code === GrpcStatus.UNAVAILABLE &&
|
||||
* error.failedAttempts < MAX_RETRY_ATTEMPTS
|
||||
* ) {
|
||||
* return true;
|
||||
* } else {
|
||||
* console.log('Failed write at document: ', error.documentRef);
|
||||
* return false;
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
onWriteError(shouldRetryCallback: (error: BulkWriterError) => boolean): void;
|
||||
/**
|
||||
* Commits all writes that have been enqueued up to this point in parallel.
|
||||
*
|
||||
* Returns a Promise that resolves when all currently queued operations have
|
||||
* been committed. The Promise will never be rejected since the results for
|
||||
* each individual operation are conveyed via their individual Promises.
|
||||
*
|
||||
* The Promise resolves immediately if there are no pending writes. Otherwise,
|
||||
* the Promise waits for all previously issued writes, but it does not wait
|
||||
* for writes that were added after the method is called. If you want to wait
|
||||
* for additional writes, call `flush()` again.
|
||||
*
|
||||
* @return {Promise<void>} A promise that resolves when all enqueued writes
|
||||
* up to this point have been committed.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let bulkWriter = firestore.bulkWriter();
|
||||
*
|
||||
* bulkWriter.create(documentRef, {foo: 'bar'});
|
||||
* bulkWriter.update(documentRef2, {foo: 'bar'});
|
||||
* bulkWriter.delete(documentRef3);
|
||||
* await flush().then(() => {
|
||||
* console.log('Executed all writes');
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
flush(): Promise<void>;
|
||||
/**
|
||||
* Commits all enqueued writes and marks the BulkWriter instance as closed.
|
||||
*
|
||||
* After calling `close()`, calling any method will throw an error. Any
|
||||
* retries scheduled as part of an `onWriteError()` handler will be run
|
||||
* before the `close()` promise resolves.
|
||||
*
|
||||
* Returns a Promise that resolves when there are no more pending writes. The
|
||||
* Promise will never be rejected. Calling this method will send all requests.
|
||||
* The promise resolves immediately if there are no pending writes.
|
||||
*
|
||||
* @return {Promise<void>} A promise that resolves when all enqueued writes
|
||||
* up to this point have been committed.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let bulkWriter = firestore.bulkWriter();
|
||||
*
|
||||
* bulkWriter.create(documentRef, {foo: 'bar'});
|
||||
* bulkWriter.update(documentRef2, {foo: 'bar'});
|
||||
* bulkWriter.delete(documentRef3);
|
||||
* await close().then(() => {
|
||||
* console.log('Executed all writes');
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
close(): Promise<void>;
|
||||
/**
|
||||
* Throws an error if the BulkWriter instance has been closed.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_verifyNotClosed(): void;
|
||||
/**
|
||||
* Sends the current batch and resets `this._bulkCommitBatch`.
|
||||
*
|
||||
* @param flush If provided, keeps re-sending operations until no more
|
||||
* operations are enqueued. This allows retries to resolve as part of a
|
||||
* `flush()` or `close()` call.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _scheduleCurrentBatch;
|
||||
/**
|
||||
* Sends the provided batch once the rate limiter does not require any delay.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _sendBatch;
|
||||
/**
|
||||
* Adds a 30% jitter to the provided backoff.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private static _applyJitter;
|
||||
/**
|
||||
* Schedules and runs the provided operation on the next available batch.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _enqueue;
|
||||
/**
|
||||
* Manages the pending operation counter and schedules the next BulkWriter
|
||||
* operation if we're under the maximum limit.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _processBufferedOps;
|
||||
/**
|
||||
* Schedules the provided operations on current BulkCommitBatch.
|
||||
* Sends the BulkCommitBatch if it reaches maximum capacity.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_sendFn(enqueueOnBatchCallback: (bulkCommitBatch: BulkCommitBatch) => void, op: BulkWriterOperation): void;
|
||||
}
|
||||
export {};
|
||||
915
server/node_modules/@google-cloud/firestore/build/src/bulk-writer.js
generated
vendored
Normal file
915
server/node_modules/@google-cloud/firestore/build/src/bulk-writer.js
generated
vendored
Normal file
@@ -0,0 +1,915 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.BulkWriter = exports.BulkWriterError = exports.DEFAULT_JITTER_FACTOR = exports.DEFAULT_MAXIMUM_OPS_PER_SECOND_LIMIT = exports.DEFAULT_INITIAL_OPS_PER_SECOND_LIMIT = exports.RETRY_MAX_BATCH_SIZE = void 0;
|
||||
const assert = require("assert");
|
||||
const backoff_1 = require("./backoff");
|
||||
const rate_limiter_1 = require("./rate-limiter");
|
||||
const timestamp_1 = require("./timestamp");
|
||||
const util_1 = require("./util");
|
||||
const write_batch_1 = require("./write-batch");
|
||||
const validate_1 = require("./validate");
|
||||
const logger_1 = require("./logger");
|
||||
const trace_util_1 = require("./telemetry/trace-util");
|
||||
/*!
|
||||
* The maximum number of writes that can be in a single batch.
|
||||
*/
|
||||
const MAX_BATCH_SIZE = 20;
|
||||
/*!
|
||||
* The maximum number of writes can be can in a single batch that is being retried.
|
||||
*/
|
||||
exports.RETRY_MAX_BATCH_SIZE = 10;
|
||||
/*!
|
||||
* The starting maximum number of operations per second as allowed by the
|
||||
* 500/50/5 rule.
|
||||
*
|
||||
* https://firebase.google.com/docs/firestore/best-practices#ramping_up_traffic.
|
||||
*/
|
||||
exports.DEFAULT_INITIAL_OPS_PER_SECOND_LIMIT = 500;
|
||||
/*!
|
||||
* The maximum number of operations per second as allowed by the 500/50/5 rule.
|
||||
* By default the rate limiter will not exceed this value.
|
||||
*
|
||||
* https://firebase.google.com/docs/firestore/best-practices#ramping_up_traffic.
|
||||
*/
|
||||
exports.DEFAULT_MAXIMUM_OPS_PER_SECOND_LIMIT = 10000;
|
||||
/*!
|
||||
* The default jitter to apply to the exponential backoff used in retries. For
|
||||
* example, a factor of 0.3 means a 30% jitter is applied.
|
||||
*/
|
||||
exports.DEFAULT_JITTER_FACTOR = 0.3;
|
||||
/*!
|
||||
* The rate by which to increase the capacity as specified by the 500/50/5 rule.
|
||||
*/
|
||||
const RATE_LIMITER_MULTIPLIER = 1.5;
|
||||
/*!
|
||||
* How often the operations per second capacity should increase in milliseconds
|
||||
* as specified by the 500/50/5 rule.
|
||||
*/
|
||||
const RATE_LIMITER_MULTIPLIER_MILLIS = 5 * 60 * 1000;
|
||||
/*!
|
||||
* The default maximum number of pending operations that can be enqueued onto a
|
||||
* BulkWriter instance. An operation is considered pending if BulkWriter has
|
||||
* sent it via RPC and is awaiting the result. BulkWriter buffers additional
|
||||
* writes after this many pending operations in order to avoiding going OOM.
|
||||
*/
|
||||
const DEFAULT_MAXIMUM_PENDING_OPERATIONS_COUNT = 500;
|
||||
/**
|
||||
* Represents a single write for BulkWriter, encapsulating operation dispatch
|
||||
* and error handling.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class BulkWriterOperation {
|
||||
/**
|
||||
* @param ref The document reference being written to.
|
||||
* @param type The type of operation that created this write.
|
||||
* @param sendFn A callback to invoke when the operation should be sent.
|
||||
* @param errorFn The user provided global error callback.
|
||||
* @param successFn The user provided global success callback.
|
||||
*/
|
||||
constructor(ref, type, sendFn, errorFn, successFn) {
|
||||
this.ref = ref;
|
||||
this.type = type;
|
||||
this.sendFn = sendFn;
|
||||
this.errorFn = errorFn;
|
||||
this.successFn = successFn;
|
||||
this.deferred = new util_1.Deferred();
|
||||
this.failedAttempts = 0;
|
||||
this._backoffDuration = 0;
|
||||
/** Whether flush() was called when this was the last enqueued operation. */
|
||||
this._flushed = false;
|
||||
}
|
||||
get promise() {
|
||||
return this.deferred.promise;
|
||||
}
|
||||
get backoffDuration() {
|
||||
return this._backoffDuration;
|
||||
}
|
||||
markFlushed() {
|
||||
this._flushed = true;
|
||||
}
|
||||
get flushed() {
|
||||
return this._flushed;
|
||||
}
|
||||
onError(error) {
|
||||
++this.failedAttempts;
|
||||
try {
|
||||
const bulkWriterError = new BulkWriterError(error.code, error.message, this.ref, this.type, this.failedAttempts);
|
||||
const shouldRetry = this.errorFn(bulkWriterError);
|
||||
(0, logger_1.logger)('BulkWriter.errorFn', null, 'Ran error callback on error code:', error.code, ', shouldRetry:', shouldRetry, ' for document:', this.ref.path);
|
||||
if (shouldRetry) {
|
||||
this.lastStatus = error.code;
|
||||
this.updateBackoffDuration();
|
||||
this.sendFn(this);
|
||||
}
|
||||
else {
|
||||
this.deferred.reject(bulkWriterError);
|
||||
}
|
||||
}
|
||||
catch (userCallbackError) {
|
||||
this.deferred.reject(userCallbackError);
|
||||
}
|
||||
}
|
||||
updateBackoffDuration() {
|
||||
if (this.lastStatus === 8 /* StatusCode.RESOURCE_EXHAUSTED */) {
|
||||
this._backoffDuration = backoff_1.DEFAULT_BACKOFF_MAX_DELAY_MS;
|
||||
}
|
||||
else if (this._backoffDuration === 0) {
|
||||
this._backoffDuration = backoff_1.DEFAULT_BACKOFF_INITIAL_DELAY_MS;
|
||||
}
|
||||
else {
|
||||
this._backoffDuration *= backoff_1.DEFAULT_BACKOFF_FACTOR;
|
||||
}
|
||||
}
|
||||
onSuccess(result) {
|
||||
try {
|
||||
this.successFn(this.ref, result);
|
||||
this.deferred.resolve(result);
|
||||
}
|
||||
catch (userCallbackError) {
|
||||
this.deferred.reject(userCallbackError);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Used to represent a batch on the BatchQueue.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class BulkCommitBatch extends write_batch_1.WriteBatch {
|
||||
constructor(firestore, maxBatchSize) {
|
||||
super(firestore);
|
||||
// The set of document reference paths present in the WriteBatch.
|
||||
this.docPaths = new Set();
|
||||
// An array of pending write operations. Only contains writes that have not
|
||||
// been resolved.
|
||||
this.pendingOps = [];
|
||||
this._maxBatchSize = maxBatchSize;
|
||||
}
|
||||
get maxBatchSize() {
|
||||
return this._maxBatchSize;
|
||||
}
|
||||
setMaxBatchSize(size) {
|
||||
assert(this.pendingOps.length <= size, 'New batch size cannot be less than the number of enqueued writes');
|
||||
this._maxBatchSize = size;
|
||||
}
|
||||
has(documentRef) {
|
||||
return this.docPaths.has(documentRef.path);
|
||||
}
|
||||
async bulkCommit(options = {}) {
|
||||
return this._firestore._traceUtil.startActiveSpan(trace_util_1.SPAN_NAME_BULK_WRITER_COMMIT, async () => {
|
||||
var _a;
|
||||
const tag = (_a = options === null || options === void 0 ? void 0 : options.requestTag) !== null && _a !== void 0 ? _a : (0, util_1.requestTag)();
|
||||
// Capture the error stack to preserve stack tracing across async calls.
|
||||
const stack = Error().stack;
|
||||
let response;
|
||||
try {
|
||||
(0, logger_1.logger)('BulkCommitBatch.bulkCommit', tag, `Sending next batch with ${this._opCount} writes`);
|
||||
const retryCodes = (0, util_1.getRetryCodes)('batchWrite');
|
||||
response = await this._commit({ retryCodes, methodName: 'batchWrite', requestTag: tag });
|
||||
}
|
||||
catch (err) {
|
||||
// Map the failure to each individual write's result.
|
||||
const ops = Array.from({ length: this.pendingOps.length });
|
||||
response = {
|
||||
writeResults: ops.map(() => {
|
||||
return {};
|
||||
}),
|
||||
status: ops.map(() => err),
|
||||
};
|
||||
}
|
||||
for (let i = 0; i < (response.writeResults || []).length; ++i) {
|
||||
// Since delete operations currently do not have write times, use a
|
||||
// sentinel Timestamp value.
|
||||
// TODO(b/158502664): Use actual delete timestamp.
|
||||
const DELETE_TIMESTAMP_SENTINEL = timestamp_1.Timestamp.fromMillis(0);
|
||||
const status = (response.status || [])[i];
|
||||
if (status.code === 0 /* StatusCode.OK */) {
|
||||
const updateTime = timestamp_1.Timestamp.fromProto(response.writeResults[i].updateTime || DELETE_TIMESTAMP_SENTINEL);
|
||||
this.pendingOps[i].onSuccess(new write_batch_1.WriteResult(updateTime));
|
||||
}
|
||||
else {
|
||||
const error = new (require('google-gax/build/src/fallback').GoogleError)(status.message || undefined);
|
||||
error.code = status.code;
|
||||
this.pendingOps[i].onError((0, util_1.wrapError)(error, stack));
|
||||
}
|
||||
}
|
||||
}, {
|
||||
[trace_util_1.ATTRIBUTE_KEY_DOC_COUNT]: this._opCount,
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Helper to update data structures associated with the operation and returns
|
||||
* the result.
|
||||
*/
|
||||
processLastOperation(op) {
|
||||
assert(!this.docPaths.has(op.ref.path), 'Batch should not contain writes to the same document');
|
||||
this.docPaths.add(op.ref.path);
|
||||
this.pendingOps.push(op);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Used to represent a buffered BulkWriterOperation.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class BufferedOperation {
|
||||
constructor(operation, sendFn) {
|
||||
this.operation = operation;
|
||||
this.sendFn = sendFn;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* The error thrown when a BulkWriter operation fails.
|
||||
*
|
||||
* @class BulkWriterError
|
||||
*/
|
||||
class BulkWriterError extends Error {
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
constructor(
|
||||
/** The status code of the error. */
|
||||
code,
|
||||
/** The error message of the error. */
|
||||
message,
|
||||
/** The document reference the operation was performed on. */
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
documentRef,
|
||||
/** The type of operation performed. */
|
||||
operationType,
|
||||
/** How many times this operation has been attempted unsuccessfully. */
|
||||
failedAttempts) {
|
||||
super(message);
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
this.documentRef = documentRef;
|
||||
this.operationType = operationType;
|
||||
this.failedAttempts = failedAttempts;
|
||||
}
|
||||
}
|
||||
exports.BulkWriterError = BulkWriterError;
|
||||
/**
|
||||
* A Firestore BulkWriter that can be used to perform a large number of writes
|
||||
* in parallel.
|
||||
*
|
||||
* @class BulkWriter
|
||||
*/
|
||||
class BulkWriter {
|
||||
// Visible for testing.
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_getBufferedOperationsCount() {
|
||||
return this._bufferedOperations.length;
|
||||
}
|
||||
// Visible for testing.
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_setMaxBatchSize(size) {
|
||||
assert(this._bulkCommitBatch.pendingOps.length === 0, 'BulkCommitBatch should be empty');
|
||||
this._maxBatchSize = size;
|
||||
this._bulkCommitBatch = new BulkCommitBatch(this.firestore, size);
|
||||
}
|
||||
// Visible for testing.
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_setMaxPendingOpCount(newMax) {
|
||||
this._maxPendingOpCount = newMax;
|
||||
}
|
||||
/** @private */
|
||||
constructor(firestore, options) {
|
||||
var _a, _b;
|
||||
this.firestore = firestore;
|
||||
/**
|
||||
* The maximum number of writes that can be in a single batch.
|
||||
* Visible for testing.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this._maxBatchSize = MAX_BATCH_SIZE;
|
||||
/**
|
||||
* The batch that is currently used to schedule operations. Once this batch
|
||||
* reaches maximum capacity, a new batch is created.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this._bulkCommitBatch = new BulkCommitBatch(this.firestore, this._maxBatchSize);
|
||||
/**
|
||||
* A pointer to the tail of all active BulkWriter operations. This pointer
|
||||
* is advanced every time a new write is enqueued.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this._lastOp = Promise.resolve();
|
||||
/**
|
||||
* The number of pending operations enqueued on this BulkWriter instance.
|
||||
* An operation is considered pending if BulkWriter has sent it via RPC and
|
||||
* is awaiting the result.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this._pendingOpsCount = 0;
|
||||
/**
|
||||
* An array containing buffered BulkWriter operations after the maximum number
|
||||
* of pending operations has been enqueued.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this._bufferedOperations = [];
|
||||
/**
|
||||
* Whether a custom error handler has been set. BulkWriter only swallows
|
||||
* errors if an error handler is set. Otherwise, an UnhandledPromiseRejection
|
||||
* is thrown by Node if an operation promise is rejected without being
|
||||
* handled.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this._errorHandlerSet = false;
|
||||
/**
|
||||
* The maximum number of pending operations that can be enqueued onto this
|
||||
* BulkWriter instance. Once the this number of writes have been enqueued,
|
||||
* subsequent writes are buffered.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this._maxPendingOpCount = DEFAULT_MAXIMUM_PENDING_OPERATIONS_COUNT;
|
||||
/**
|
||||
* The user-provided callback to be run every time a BulkWriter operation
|
||||
* successfully completes.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this._successFn = () => { };
|
||||
/**
|
||||
* The user-provided callback to be run every time a BulkWriter operation
|
||||
* fails.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this._errorFn = error => {
|
||||
const isRetryableDeleteError = error.operationType === 'delete' &&
|
||||
error.code === 13 /* StatusCode.INTERNAL */;
|
||||
const retryCodes = (0, util_1.getRetryCodes)('batchWrite');
|
||||
return ((retryCodes.includes(error.code) || isRetryableDeleteError) &&
|
||||
error.failedAttempts < backoff_1.MAX_RETRY_ATTEMPTS);
|
||||
};
|
||||
this.firestore._incrementBulkWritersCount();
|
||||
validateBulkWriterOptions(options);
|
||||
if ((options === null || options === void 0 ? void 0 : options.throttling) === false) {
|
||||
this._rateLimiter = new rate_limiter_1.RateLimiter(Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY);
|
||||
}
|
||||
else {
|
||||
let startingRate = exports.DEFAULT_INITIAL_OPS_PER_SECOND_LIMIT;
|
||||
let maxRate = exports.DEFAULT_MAXIMUM_OPS_PER_SECOND_LIMIT;
|
||||
if (typeof (options === null || options === void 0 ? void 0 : options.throttling) !== 'boolean') {
|
||||
if (((_a = options === null || options === void 0 ? void 0 : options.throttling) === null || _a === void 0 ? void 0 : _a.maxOpsPerSecond) !== undefined) {
|
||||
maxRate = options.throttling.maxOpsPerSecond;
|
||||
}
|
||||
if (((_b = options === null || options === void 0 ? void 0 : options.throttling) === null || _b === void 0 ? void 0 : _b.initialOpsPerSecond) !== undefined) {
|
||||
startingRate = options.throttling.initialOpsPerSecond;
|
||||
}
|
||||
// The initial validation step ensures that the maxOpsPerSecond is
|
||||
// greater than initialOpsPerSecond. If this inequality is true, that
|
||||
// means initialOpsPerSecond was not set and maxOpsPerSecond is less
|
||||
// than the default starting rate.
|
||||
if (maxRate < startingRate) {
|
||||
startingRate = maxRate;
|
||||
}
|
||||
// Ensure that the batch size is not larger than the number of allowed
|
||||
// operations per second.
|
||||
if (startingRate < this._maxBatchSize) {
|
||||
this._maxBatchSize = startingRate;
|
||||
}
|
||||
}
|
||||
this._rateLimiter = new rate_limiter_1.RateLimiter(startingRate, RATE_LIMITER_MULTIPLIER, RATE_LIMITER_MULTIPLIER_MILLIS, maxRate);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Create a document with the provided data. This single operation will fail
|
||||
* if a document exists at its location.
|
||||
*
|
||||
* @param {DocumentReference} documentRef A reference to the document to be
|
||||
* created.
|
||||
* @param {T} data The object to serialize as the document.
|
||||
* @throws {Error} If the provided input is not a valid Firestore document.
|
||||
* @returns {Promise<WriteResult>} A promise that resolves with the result of
|
||||
* the write. If the write fails, the promise is rejected with a
|
||||
* [BulkWriterError]{@link BulkWriterError}.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let bulkWriter = firestore.bulkWriter();
|
||||
* let documentRef = firestore.collection('col').doc();
|
||||
*
|
||||
* bulkWriter
|
||||
* .create(documentRef, {foo: 'bar'})
|
||||
* .then(result => {
|
||||
* console.log('Successfully executed write at: ', result);
|
||||
* })
|
||||
* .catch(err => {
|
||||
* console.log('Write failed with: ', err);
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
create(documentRef, data) {
|
||||
this._verifyNotClosed();
|
||||
return this._enqueue(documentRef, 'create', bulkCommitBatch => bulkCommitBatch.create(documentRef, data));
|
||||
}
|
||||
/**
|
||||
* Delete a document from the database.
|
||||
*
|
||||
* @param {DocumentReference} documentRef A reference to the document to be
|
||||
* deleted.
|
||||
* @param {Precondition=} precondition A precondition to enforce for this
|
||||
* delete.
|
||||
* @param {Timestamp=} precondition.lastUpdateTime If set, enforces that the
|
||||
* document was last updated at lastUpdateTime. Fails the batch if the
|
||||
* document doesn't exist or was last updated at a different time.
|
||||
* @returns {Promise<WriteResult>} A promise that resolves with the result of
|
||||
* the delete. If the delete fails, the promise is rejected with a
|
||||
* [BulkWriterError]{@link BulkWriterError}.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let bulkWriter = firestore.bulkWriter();
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* bulkWriter
|
||||
* .delete(documentRef)
|
||||
* .then(result => {
|
||||
* console.log('Successfully deleted document');
|
||||
* })
|
||||
* .catch(err => {
|
||||
* console.log('Delete failed with: ', err);
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
delete(documentRef, precondition) {
|
||||
this._verifyNotClosed();
|
||||
return this._enqueue(documentRef, 'delete', bulkCommitBatch => bulkCommitBatch.delete(documentRef, precondition));
|
||||
}
|
||||
/**
|
||||
* Write to the document referred to by the provided
|
||||
* [DocumentReference]{@link DocumentReference}. If the document does not
|
||||
* exist yet, it will be created. If you pass [SetOptions]{@link SetOptions}.,
|
||||
* the provided data can be merged into the existing document.
|
||||
*
|
||||
* @param {DocumentReference} documentRef A reference to the document to be
|
||||
* set.
|
||||
* @param {T} data The object to serialize as the document.
|
||||
* @param {SetOptions=} options An object to configure the set behavior.
|
||||
* @throws {Error} If the provided input is not a valid Firestore document.
|
||||
* @param {boolean=} options.merge - If true, set() merges the values
|
||||
* specified in its data argument. Fields omitted from this set() call remain
|
||||
* untouched. If your input sets any field to an empty map, all nested fields
|
||||
* are overwritten.
|
||||
* @param {Array.<string|FieldPath>=} options.mergeFields - If provided, set()
|
||||
* only replaces the specified field paths. Any field path that is not
|
||||
* specified is ignored and remains untouched. If your input sets any field to
|
||||
* an empty map, all nested fields are overwritten.
|
||||
* @returns {Promise<WriteResult>} A promise that resolves with the result of
|
||||
* the write. If the write fails, the promise is rejected with a
|
||||
* [BulkWriterError]{@link BulkWriterError}.
|
||||
*
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let bulkWriter = firestore.bulkWriter();
|
||||
* let documentRef = firestore.collection('col').doc();
|
||||
*
|
||||
* bulkWriter
|
||||
* .set(documentRef, {foo: 'bar'})
|
||||
* .then(result => {
|
||||
* console.log('Successfully executed write at: ', result);
|
||||
* })
|
||||
* .catch(err => {
|
||||
* console.log('Write failed with: ', err);
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
set(documentRef, data, options) {
|
||||
this._verifyNotClosed();
|
||||
return this._enqueue(documentRef, 'set', bulkCommitBatch => {
|
||||
if (options) {
|
||||
return bulkCommitBatch.set(documentRef, data, options);
|
||||
}
|
||||
else {
|
||||
return bulkCommitBatch.set(documentRef, data);
|
||||
}
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Update fields of the document referred to by the provided
|
||||
* [DocumentReference]{@link DocumentReference}. If the document doesn't yet
|
||||
* exist, the update fails and the entire batch will be rejected.
|
||||
*
|
||||
* The update() method accepts either an object with field paths encoded as
|
||||
* keys and field values encoded as values, or a variable number of arguments
|
||||
* that alternate between field paths and field values. Nested fields can be
|
||||
* updated by providing dot-separated field path strings or by providing
|
||||
* FieldPath objects.
|
||||
*
|
||||
*
|
||||
* A Precondition restricting this update can be specified as the last
|
||||
* argument.
|
||||
*
|
||||
* @param {DocumentReference} documentRef A reference to the document to be
|
||||
* updated.
|
||||
* @param {UpdateData|string|FieldPath} dataOrField An object containing the
|
||||
* fields and values with which to update the document or the path of the
|
||||
* first field to update.
|
||||
* @param {...(Precondition|*|string|FieldPath)} preconditionOrValues - An
|
||||
* alternating list of field paths and values to update or a Precondition to
|
||||
* restrict this update
|
||||
* @throws {Error} If the provided input is not valid Firestore data.
|
||||
* @returns {Promise<WriteResult>} A promise that resolves with the result of
|
||||
* the write. If the write fails, the promise is rejected with a
|
||||
* [BulkWriterError]{@link BulkWriterError}.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let bulkWriter = firestore.bulkWriter();
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* bulkWriter
|
||||
* .update(documentRef, {foo: 'bar'})
|
||||
* .then(result => {
|
||||
* console.log('Successfully executed write at: ', result);
|
||||
* })
|
||||
* .catch(err => {
|
||||
* console.log('Write failed with: ', err);
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
update(documentRef, dataOrField, ...preconditionOrValues) {
|
||||
this._verifyNotClosed();
|
||||
return this._enqueue(documentRef, 'update', bulkCommitBatch => bulkCommitBatch.update(documentRef, dataOrField, ...preconditionOrValues));
|
||||
}
|
||||
/**
|
||||
* Callback function set by {@link BulkWriter#onWriteResult} that is run
|
||||
* every time a {@link BulkWriter} operation successfully completes.
|
||||
*
|
||||
* @callback BulkWriter~successCallback
|
||||
* @param {DocumentReference} documentRef The document reference the
|
||||
* operation was performed on
|
||||
* @param {WriteResult} result The server write time of the operation.
|
||||
*/
|
||||
/**
|
||||
* Attaches a listener that is run every time a BulkWriter operation
|
||||
* successfully completes.
|
||||
*
|
||||
* @param {BulkWriter~successCallback} successCallback A callback to be
|
||||
* called every time a BulkWriter operation successfully completes.
|
||||
* @example
|
||||
* ```
|
||||
* let bulkWriter = firestore.bulkWriter();
|
||||
*
|
||||
* bulkWriter
|
||||
* .onWriteResult((documentRef, result) => {
|
||||
* console.log(
|
||||
* 'Successfully executed write on document: ',
|
||||
* documentRef,
|
||||
* ' at: ',
|
||||
* result
|
||||
* );
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
onWriteResult(successCallback) {
|
||||
this._successFn = successCallback;
|
||||
}
|
||||
/**
|
||||
* Callback function set by {@link BulkWriter#onWriteError} that is run when
|
||||
* a write fails in order to determine whether {@link BulkWriter} should
|
||||
* retry the operation.
|
||||
*
|
||||
* @callback BulkWriter~shouldRetryCallback
|
||||
* @param {BulkWriterError} error The error object with information about the
|
||||
* operation and error.
|
||||
* @returns {boolean} Whether or not to retry the failed operation. Returning
|
||||
* `true` retries the operation. Returning `false` will stop the retry loop.
|
||||
*/
|
||||
/**
|
||||
* Attaches an error handler listener that is run every time a BulkWriter
|
||||
* operation fails.
|
||||
*
|
||||
* BulkWriter has a default error handler that retries UNAVAILABLE and
|
||||
* ABORTED errors up to a maximum of 10 failed attempts. When an error
|
||||
* handler is specified, the default error handler will be overwritten.
|
||||
*
|
||||
* @param shouldRetryCallback {BulkWriter~shouldRetryCallback} A callback to
|
||||
* be called every time a BulkWriter operation fails. Returning `true` will
|
||||
* retry the operation. Returning `false` will stop the retry loop.
|
||||
* @example
|
||||
* ```
|
||||
* let bulkWriter = firestore.bulkWriter();
|
||||
*
|
||||
* bulkWriter
|
||||
* .onWriteError((error) => {
|
||||
* if (
|
||||
* error.code === GrpcStatus.UNAVAILABLE &&
|
||||
* error.failedAttempts < MAX_RETRY_ATTEMPTS
|
||||
* ) {
|
||||
* return true;
|
||||
* } else {
|
||||
* console.log('Failed write at document: ', error.documentRef);
|
||||
* return false;
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
onWriteError(shouldRetryCallback) {
|
||||
this._errorHandlerSet = true;
|
||||
this._errorFn = shouldRetryCallback;
|
||||
}
|
||||
/**
|
||||
* Commits all writes that have been enqueued up to this point in parallel.
|
||||
*
|
||||
* Returns a Promise that resolves when all currently queued operations have
|
||||
* been committed. The Promise will never be rejected since the results for
|
||||
* each individual operation are conveyed via their individual Promises.
|
||||
*
|
||||
* The Promise resolves immediately if there are no pending writes. Otherwise,
|
||||
* the Promise waits for all previously issued writes, but it does not wait
|
||||
* for writes that were added after the method is called. If you want to wait
|
||||
* for additional writes, call `flush()` again.
|
||||
*
|
||||
* @return {Promise<void>} A promise that resolves when all enqueued writes
|
||||
* up to this point have been committed.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let bulkWriter = firestore.bulkWriter();
|
||||
*
|
||||
* bulkWriter.create(documentRef, {foo: 'bar'});
|
||||
* bulkWriter.update(documentRef2, {foo: 'bar'});
|
||||
* bulkWriter.delete(documentRef3);
|
||||
* await flush().then(() => {
|
||||
* console.log('Executed all writes');
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
flush() {
|
||||
this._verifyNotClosed();
|
||||
this._scheduleCurrentBatch(/* flush= */ true);
|
||||
// Mark the most recent operation as flushed to ensure that the batch
|
||||
// containing it will be sent once it's popped from the buffer.
|
||||
if (this._bufferedOperations.length > 0) {
|
||||
this._bufferedOperations[this._bufferedOperations.length - 1].operation.markFlushed();
|
||||
}
|
||||
return this._lastOp;
|
||||
}
|
||||
/**
|
||||
* Commits all enqueued writes and marks the BulkWriter instance as closed.
|
||||
*
|
||||
* After calling `close()`, calling any method will throw an error. Any
|
||||
* retries scheduled as part of an `onWriteError()` handler will be run
|
||||
* before the `close()` promise resolves.
|
||||
*
|
||||
* Returns a Promise that resolves when there are no more pending writes. The
|
||||
* Promise will never be rejected. Calling this method will send all requests.
|
||||
* The promise resolves immediately if there are no pending writes.
|
||||
*
|
||||
* @return {Promise<void>} A promise that resolves when all enqueued writes
|
||||
* up to this point have been committed.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let bulkWriter = firestore.bulkWriter();
|
||||
*
|
||||
* bulkWriter.create(documentRef, {foo: 'bar'});
|
||||
* bulkWriter.update(documentRef2, {foo: 'bar'});
|
||||
* bulkWriter.delete(documentRef3);
|
||||
* await close().then(() => {
|
||||
* console.log('Executed all writes');
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
close() {
|
||||
if (!this._closePromise) {
|
||||
this._closePromise = this.flush();
|
||||
this.firestore._decrementBulkWritersCount();
|
||||
}
|
||||
return this._closePromise;
|
||||
}
|
||||
/**
|
||||
* Throws an error if the BulkWriter instance has been closed.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_verifyNotClosed() {
|
||||
if (this._closePromise) {
|
||||
throw new Error('BulkWriter has already been closed.');
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Sends the current batch and resets `this._bulkCommitBatch`.
|
||||
*
|
||||
* @param flush If provided, keeps re-sending operations until no more
|
||||
* operations are enqueued. This allows retries to resolve as part of a
|
||||
* `flush()` or `close()` call.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_scheduleCurrentBatch(flush = false) {
|
||||
if (this._bulkCommitBatch._opCount === 0)
|
||||
return;
|
||||
const pendingBatch = this._bulkCommitBatch;
|
||||
this._bulkCommitBatch = new BulkCommitBatch(this.firestore, this._maxBatchSize);
|
||||
// Use the write with the longest backoff duration when determining backoff.
|
||||
const highestBackoffDuration = pendingBatch.pendingOps.reduce((prev, cur) => (prev.backoffDuration > cur.backoffDuration ? prev : cur)).backoffDuration;
|
||||
const backoffMsWithJitter = BulkWriter._applyJitter(highestBackoffDuration);
|
||||
const delayedExecution = new util_1.Deferred();
|
||||
if (backoffMsWithJitter > 0) {
|
||||
(0, backoff_1.delayExecution)(() => delayedExecution.resolve(), backoffMsWithJitter);
|
||||
}
|
||||
else {
|
||||
delayedExecution.resolve();
|
||||
}
|
||||
delayedExecution.promise.then(() => this._sendBatch(pendingBatch, flush));
|
||||
}
|
||||
/**
|
||||
* Sends the provided batch once the rate limiter does not require any delay.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
async _sendBatch(batch, flush = false) {
|
||||
const tag = (0, util_1.requestTag)();
|
||||
// Send the batch if it is does not require any delay, or schedule another
|
||||
// attempt after the appropriate timeout.
|
||||
const underRateLimit = this._rateLimiter.tryMakeRequest(batch._opCount);
|
||||
if (underRateLimit) {
|
||||
await batch.bulkCommit({ requestTag: tag });
|
||||
if (flush)
|
||||
this._scheduleCurrentBatch(flush);
|
||||
}
|
||||
else {
|
||||
const delayMs = this._rateLimiter.getNextRequestDelayMs(batch._opCount);
|
||||
(0, logger_1.logger)('BulkWriter._sendBatch', tag, `Backing off for ${delayMs} seconds`);
|
||||
(0, backoff_1.delayExecution)(() => this._sendBatch(batch, flush), delayMs);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Adds a 30% jitter to the provided backoff.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
static _applyJitter(backoffMs) {
|
||||
if (backoffMs === 0)
|
||||
return 0;
|
||||
// Random value in [-0.3, 0.3].
|
||||
const jitter = exports.DEFAULT_JITTER_FACTOR * (Math.random() * 2 - 1);
|
||||
return Math.min(backoff_1.DEFAULT_BACKOFF_MAX_DELAY_MS, backoffMs + jitter * backoffMs);
|
||||
}
|
||||
/**
|
||||
* Schedules and runs the provided operation on the next available batch.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_enqueue(ref, type, enqueueOnBatchCallback) {
|
||||
const bulkWriterOp = new BulkWriterOperation(ref, type, this._sendFn.bind(this, enqueueOnBatchCallback), this._errorFn.bind(this), this._successFn.bind(this));
|
||||
// Swallow the error if the developer has set an error listener. This
|
||||
// prevents UnhandledPromiseRejections from being thrown if a floating
|
||||
// BulkWriter operation promise fails when an error handler is specified.
|
||||
//
|
||||
// This is done here in order to chain the caught promise onto `lastOp`,
|
||||
// which ensures that flush() resolves after the operation promise.
|
||||
const userPromise = bulkWriterOp.promise.catch(err => {
|
||||
if (!this._errorHandlerSet) {
|
||||
throw err;
|
||||
}
|
||||
else {
|
||||
return bulkWriterOp.promise;
|
||||
}
|
||||
});
|
||||
// Advance the `_lastOp` pointer. This ensures that `_lastOp` only resolves
|
||||
// when both the previous and the current write resolve.
|
||||
this._lastOp = this._lastOp.then(() => (0, util_1.silencePromise)(userPromise));
|
||||
// Schedule the operation if the BulkWriter has fewer than the maximum
|
||||
// number of allowed pending operations, or add the operation to the
|
||||
// buffer.
|
||||
if (this._pendingOpsCount < this._maxPendingOpCount) {
|
||||
this._pendingOpsCount++;
|
||||
this._sendFn(enqueueOnBatchCallback, bulkWriterOp);
|
||||
}
|
||||
else {
|
||||
this._bufferedOperations.push(new BufferedOperation(bulkWriterOp, () => {
|
||||
this._pendingOpsCount++;
|
||||
this._sendFn(enqueueOnBatchCallback, bulkWriterOp);
|
||||
}));
|
||||
}
|
||||
// Chain the BulkWriter operation promise with the buffer processing logic
|
||||
// in order to ensure that it runs and that subsequent operations are
|
||||
// enqueued before the next batch is scheduled in `_sendBatch()`.
|
||||
return userPromise
|
||||
.then(res => {
|
||||
this._pendingOpsCount--;
|
||||
this._processBufferedOps();
|
||||
return res;
|
||||
})
|
||||
.catch(err => {
|
||||
this._pendingOpsCount--;
|
||||
this._processBufferedOps();
|
||||
throw err;
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Manages the pending operation counter and schedules the next BulkWriter
|
||||
* operation if we're under the maximum limit.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_processBufferedOps() {
|
||||
if (this._pendingOpsCount < this._maxPendingOpCount &&
|
||||
this._bufferedOperations.length > 0) {
|
||||
const nextOp = this._bufferedOperations.shift();
|
||||
nextOp.sendFn();
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Schedules the provided operations on current BulkCommitBatch.
|
||||
* Sends the BulkCommitBatch if it reaches maximum capacity.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_sendFn(enqueueOnBatchCallback, op) {
|
||||
// A backoff duration greater than 0 implies that this batch is a retry.
|
||||
// Retried writes are sent with a batch size of 10 in order to guarantee
|
||||
// that the batch is under the 10MiB limit.
|
||||
if (op.backoffDuration > 0) {
|
||||
if (this._bulkCommitBatch.pendingOps.length >= exports.RETRY_MAX_BATCH_SIZE) {
|
||||
this._scheduleCurrentBatch(/* flush= */ false);
|
||||
}
|
||||
this._bulkCommitBatch.setMaxBatchSize(exports.RETRY_MAX_BATCH_SIZE);
|
||||
}
|
||||
if (this._bulkCommitBatch.has(op.ref)) {
|
||||
// Create a new batch since the backend doesn't support batches with two
|
||||
// writes to the same document.
|
||||
this._scheduleCurrentBatch();
|
||||
}
|
||||
enqueueOnBatchCallback(this._bulkCommitBatch);
|
||||
this._bulkCommitBatch.processLastOperation(op);
|
||||
if (this._bulkCommitBatch._opCount === this._bulkCommitBatch.maxBatchSize) {
|
||||
this._scheduleCurrentBatch();
|
||||
}
|
||||
else if (op.flushed) {
|
||||
// If flush() was called before this operation was enqueued into a batch,
|
||||
// we still need to schedule it.
|
||||
this._scheduleCurrentBatch(/* flush= */ true);
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.BulkWriter = BulkWriter;
|
||||
/**
|
||||
* Validates the use of 'value' as BulkWriterOptions.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param value The BulkWriterOptions object to validate.
|
||||
* @throws if the input is not a valid BulkWriterOptions object.
|
||||
*/
|
||||
function validateBulkWriterOptions(value) {
|
||||
if ((0, validate_1.validateOptional)(value, { optional: true })) {
|
||||
return;
|
||||
}
|
||||
const argName = 'options';
|
||||
if (!(0, util_1.isObject)(value)) {
|
||||
throw new Error(`${(0, validate_1.invalidArgumentMessage)(argName, 'bulkWriter() options argument')} Input is not an object.`);
|
||||
}
|
||||
const options = value;
|
||||
if (options.throttling === undefined ||
|
||||
typeof options.throttling === 'boolean') {
|
||||
return;
|
||||
}
|
||||
if (options.throttling.initialOpsPerSecond !== undefined) {
|
||||
(0, validate_1.validateInteger)('initialOpsPerSecond', options.throttling.initialOpsPerSecond, {
|
||||
minValue: 1,
|
||||
});
|
||||
}
|
||||
if (options.throttling.maxOpsPerSecond !== undefined) {
|
||||
(0, validate_1.validateInteger)('maxOpsPerSecond', options.throttling.maxOpsPerSecond, {
|
||||
minValue: 1,
|
||||
});
|
||||
if (options.throttling.initialOpsPerSecond !== undefined &&
|
||||
options.throttling.initialOpsPerSecond >
|
||||
options.throttling.maxOpsPerSecond) {
|
||||
throw new Error(`${(0, validate_1.invalidArgumentMessage)(argName, 'bulkWriter() options argument')} "maxOpsPerSecond" cannot be less than "initialOpsPerSecond".`);
|
||||
}
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=bulk-writer.js.map
|
||||
24
server/node_modules/@google-cloud/firestore/build/src/bundle.d.ts
generated
vendored
Normal file
24
server/node_modules/@google-cloud/firestore/build/src/bundle.d.ts
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
import { DocumentSnapshot } from './document';
|
||||
import { QuerySnapshot } from './reference/query-snapshot';
|
||||
/**
|
||||
* Builds a Firestore data bundle with results from the given document and query snapshots.
|
||||
*/
|
||||
export declare class BundleBuilder {
|
||||
readonly bundleId: string;
|
||||
private documents;
|
||||
private namedQueries;
|
||||
private latestReadTime;
|
||||
constructor(bundleId: string);
|
||||
add(documentSnapshot: DocumentSnapshot): BundleBuilder;
|
||||
add(queryName: string, querySnapshot: QuerySnapshot): BundleBuilder;
|
||||
private addBundledDocument;
|
||||
private addNamedQuery;
|
||||
/**
|
||||
* Converts a IBundleElement to a Buffer whose content is the length prefixed JSON representation
|
||||
* of the element.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private elementToLengthPrefixedBuffer;
|
||||
build(): Buffer;
|
||||
}
|
||||
207
server/node_modules/@google-cloud/firestore/build/src/bundle.js
generated
vendored
Normal file
207
server/node_modules/@google-cloud/firestore/build/src/bundle.js
generated
vendored
Normal file
@@ -0,0 +1,207 @@
|
||||
"use strict";
|
||||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.BundleBuilder = void 0;
|
||||
const document_1 = require("./document");
|
||||
const query_snapshot_1 = require("./reference/query-snapshot");
|
||||
const timestamp_1 = require("./timestamp");
|
||||
const validate_1 = require("./validate");
|
||||
const BUNDLE_VERSION = 1;
|
||||
/**
|
||||
* Builds a Firestore data bundle with results from the given document and query snapshots.
|
||||
*/
|
||||
class BundleBuilder {
|
||||
constructor(bundleId) {
|
||||
this.bundleId = bundleId;
|
||||
// Resulting documents for the bundle, keyed by full document path.
|
||||
this.documents = new Map();
|
||||
// Named queries saved in the bundle, keyed by query name.
|
||||
this.namedQueries = new Map();
|
||||
// The latest read time among all bundled documents and queries.
|
||||
this.latestReadTime = new timestamp_1.Timestamp(0, 0);
|
||||
}
|
||||
/**
|
||||
* Adds a Firestore document snapshot or query snapshot to the bundle.
|
||||
* Both the documents data and the query read time will be included in the bundle.
|
||||
*
|
||||
* @param {DocumentSnapshot | string} documentOrName A document snapshot to add or a name of a query.
|
||||
* @param {Query=} querySnapshot A query snapshot to add to the bundle, if provided.
|
||||
* @returns {BundleBuilder} This instance.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* const bundle = firestore.bundle('data-bundle');
|
||||
* const docSnapshot = await firestore.doc('abc/123').get();
|
||||
* const querySnapshot = await firestore.collection('coll').get();
|
||||
*
|
||||
* const bundleBuffer = bundle.add(docSnapshot) // Add a document
|
||||
* .add('coll-query', querySnapshot) // Add a named query.
|
||||
* .build()
|
||||
* // Save `bundleBuffer` to CDN or stream it to clients.
|
||||
* ```
|
||||
*/
|
||||
add(documentOrName, querySnapshot) {
|
||||
// eslint-disable-next-line prefer-rest-params
|
||||
(0, validate_1.validateMinNumberOfArguments)('BundleBuilder.add', arguments, 1);
|
||||
// eslint-disable-next-line prefer-rest-params
|
||||
(0, validate_1.validateMaxNumberOfArguments)('BundleBuilder.add', arguments, 2);
|
||||
if (arguments.length === 1) {
|
||||
validateDocumentSnapshot('documentOrName', documentOrName);
|
||||
this.addBundledDocument(documentOrName);
|
||||
}
|
||||
else {
|
||||
(0, validate_1.validateString)('documentOrName', documentOrName);
|
||||
validateQuerySnapshot('querySnapshot', querySnapshot);
|
||||
this.addNamedQuery(documentOrName, querySnapshot);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
addBundledDocument(snap, queryName) {
|
||||
const originalDocument = this.documents.get(snap.ref.path);
|
||||
const originalQueries = originalDocument === null || originalDocument === void 0 ? void 0 : originalDocument.metadata.queries;
|
||||
// Update with document built from `snap` because it is newer.
|
||||
if (!originalDocument ||
|
||||
timestamp_1.Timestamp.fromProto(originalDocument.metadata.readTime) < snap.readTime) {
|
||||
const docProto = snap.toDocumentProto();
|
||||
this.documents.set(snap.ref.path, {
|
||||
document: snap.exists ? docProto : undefined,
|
||||
metadata: {
|
||||
name: docProto.name,
|
||||
readTime: snap.readTime.toProto().timestampValue,
|
||||
exists: snap.exists,
|
||||
},
|
||||
});
|
||||
}
|
||||
// Update `queries` to include both original and `queryName`.
|
||||
const newDocument = this.documents.get(snap.ref.path);
|
||||
newDocument.metadata.queries = originalQueries || [];
|
||||
if (queryName) {
|
||||
newDocument.metadata.queries.push(queryName);
|
||||
}
|
||||
if (snap.readTime > this.latestReadTime) {
|
||||
this.latestReadTime = snap.readTime;
|
||||
}
|
||||
}
|
||||
addNamedQuery(name, querySnap) {
|
||||
if (this.namedQueries.has(name)) {
|
||||
throw new Error(`Query name conflict: ${name} has already been added.`);
|
||||
}
|
||||
this.namedQueries.set(name, {
|
||||
name,
|
||||
bundledQuery: querySnap.query._toBundledQuery(),
|
||||
readTime: querySnap.readTime.toProto().timestampValue,
|
||||
});
|
||||
for (const snap of querySnap.docs) {
|
||||
this.addBundledDocument(snap, name);
|
||||
}
|
||||
if (querySnap.readTime > this.latestReadTime) {
|
||||
this.latestReadTime = querySnap.readTime;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Converts a IBundleElement to a Buffer whose content is the length prefixed JSON representation
|
||||
* of the element.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
elementToLengthPrefixedBuffer(bundleElement) {
|
||||
// Convert to a valid proto message object then take its JSON representation.
|
||||
// This take cares of stuff like converting internal byte array fields
|
||||
// to Base64 encodings.
|
||||
// We lazy-load the Proto file to reduce cold-start times.
|
||||
const message = require('../protos/firestore_v1_proto_api')
|
||||
.firestore.BundleElement.fromObject(bundleElement)
|
||||
.toJSON();
|
||||
const buffer = Buffer.from(JSON.stringify(message), 'utf-8');
|
||||
const lengthBuffer = Buffer.from(buffer.length.toString());
|
||||
return Buffer.concat([lengthBuffer, buffer]);
|
||||
}
|
||||
build() {
|
||||
let bundleBuffer = Buffer.alloc(0);
|
||||
for (const namedQuery of this.namedQueries.values()) {
|
||||
bundleBuffer = Buffer.concat([
|
||||
bundleBuffer,
|
||||
this.elementToLengthPrefixedBuffer({ namedQuery }),
|
||||
]);
|
||||
}
|
||||
for (const bundledDocument of this.documents.values()) {
|
||||
const documentMetadata = bundledDocument.metadata;
|
||||
bundleBuffer = Buffer.concat([
|
||||
bundleBuffer,
|
||||
this.elementToLengthPrefixedBuffer({ documentMetadata }),
|
||||
]);
|
||||
// Write to the bundle if document exists.
|
||||
const document = bundledDocument.document;
|
||||
if (document) {
|
||||
bundleBuffer = Buffer.concat([
|
||||
bundleBuffer,
|
||||
this.elementToLengthPrefixedBuffer({ document }),
|
||||
]);
|
||||
}
|
||||
}
|
||||
const metadata = {
|
||||
id: this.bundleId,
|
||||
createTime: this.latestReadTime.toProto().timestampValue,
|
||||
version: BUNDLE_VERSION,
|
||||
totalDocuments: this.documents.size,
|
||||
totalBytes: bundleBuffer.length,
|
||||
};
|
||||
// Prepends the metadata element to the bundleBuffer: `bundleBuffer` is the second argument to `Buffer.concat`.
|
||||
bundleBuffer = Buffer.concat([
|
||||
this.elementToLengthPrefixedBuffer({ metadata }),
|
||||
bundleBuffer,
|
||||
]);
|
||||
return bundleBuffer;
|
||||
}
|
||||
}
|
||||
exports.BundleBuilder = BundleBuilder;
|
||||
/**
|
||||
* Convenient class to hold both the metadata and the actual content of a document to be bundled.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class BundledDocument {
|
||||
constructor(metadata, document) {
|
||||
this.metadata = metadata;
|
||||
this.document = document;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Validates that 'value' is DocumentSnapshot.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param arg The argument name or argument index (for varargs methods).
|
||||
* @param value The input to validate.
|
||||
*/
|
||||
function validateDocumentSnapshot(arg, value) {
|
||||
if (!(value instanceof document_1.DocumentSnapshot)) {
|
||||
throw new Error((0, validate_1.invalidArgumentMessage)(arg, 'DocumentSnapshot'));
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Validates that 'value' is QuerySnapshot.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param arg The argument name or argument index (for varargs methods).
|
||||
* @param value The input to validate.
|
||||
*/
|
||||
function validateQuerySnapshot(arg, value) {
|
||||
if (!(value instanceof query_snapshot_1.QuerySnapshot)) {
|
||||
throw new Error((0, validate_1.invalidArgumentMessage)(arg, 'QuerySnapshot'));
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=bundle.js.map
|
||||
90
server/node_modules/@google-cloud/firestore/build/src/collection-group.d.ts
generated
vendored
Normal file
90
server/node_modules/@google-cloud/firestore/build/src/collection-group.d.ts
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
import * as firestore from '@google-cloud/firestore';
|
||||
import { QueryPartition } from './query-partition';
|
||||
import { Query } from './reference/query';
|
||||
import { Firestore } from './index';
|
||||
/**
|
||||
* A `CollectionGroup` refers to all documents that are contained in a
|
||||
* collection or subcollection with a specific collection ID.
|
||||
*
|
||||
* @class CollectionGroup
|
||||
*/
|
||||
export declare class CollectionGroup<AppModelType = firestore.DocumentData, DbModelType extends firestore.DocumentData = firestore.DocumentData> extends Query<AppModelType, DbModelType> implements firestore.CollectionGroup<AppModelType, DbModelType> {
|
||||
/** @private */
|
||||
constructor(firestore: Firestore, collectionId: string, converter: firestore.FirestoreDataConverter<AppModelType, DbModelType> | undefined);
|
||||
/**
|
||||
* Partitions a query by returning partition cursors that can be used to run
|
||||
* the query in parallel. The returned cursors are split points that can be
|
||||
* used as starting and end points for individual query invocations.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* const query = firestore.collectionGroup('collectionId');
|
||||
* for await (const partition of query.getPartitions(42)) {
|
||||
* const partitionedQuery = partition.toQuery();
|
||||
* const querySnapshot = await partitionedQuery.get();
|
||||
* console.log(`Partition contained ${querySnapshot.length} documents`);
|
||||
* }
|
||||
*
|
||||
* ```
|
||||
* @param {number} desiredPartitionCount The desired maximum number of
|
||||
* partition points. The number must be strictly positive. The actual number
|
||||
* of partitions returned may be fewer.
|
||||
* @return {AsyncIterable<QueryPartition>} An AsyncIterable of
|
||||
* `QueryPartition`s.
|
||||
*/
|
||||
getPartitions(desiredPartitionCount: number): AsyncIterable<QueryPartition<AppModelType, DbModelType>>;
|
||||
/**
|
||||
* Applies a custom data converter to this `CollectionGroup`, allowing you
|
||||
* to use your own custom model objects with Firestore. When you call get()
|
||||
* on the returned `CollectionGroup`, the provided converter will convert
|
||||
* between Firestore data of type `NewDbModelType` and your custom type
|
||||
* `NewAppModelType`.
|
||||
*
|
||||
* Using the converter allows you to specify generic type arguments when
|
||||
* storing and retrieving objects from Firestore.
|
||||
*
|
||||
* Passing in `null` as the converter parameter removes the current
|
||||
* converter.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* class Post {
|
||||
* constructor(readonly title: string, readonly author: string) {}
|
||||
*
|
||||
* toString(): string {
|
||||
* return this.title + ', by ' + this.author;
|
||||
* }
|
||||
* }
|
||||
*
|
||||
* const postConverter = {
|
||||
* toFirestore(post: Post): FirebaseFirestore.DocumentData {
|
||||
* return {title: post.title, author: post.author};
|
||||
* },
|
||||
* fromFirestore(
|
||||
* snapshot: FirebaseFirestore.QueryDocumentSnapshot
|
||||
* ): Post {
|
||||
* const data = snapshot.data();
|
||||
* return new Post(data.title, data.author);
|
||||
* }
|
||||
* };
|
||||
*
|
||||
* const querySnapshot = await Firestore()
|
||||
* .collectionGroup('posts')
|
||||
* .withConverter(postConverter)
|
||||
* .get();
|
||||
* for (const doc of querySnapshot.docs) {
|
||||
* const post = doc.data();
|
||||
* post.title; // string
|
||||
* post.toString(); // Should be defined
|
||||
* post.someNonExistentProperty; // TS error
|
||||
* }
|
||||
*
|
||||
* ```
|
||||
* @param {FirestoreDataConverter | null} converter Converts objects to and
|
||||
* from Firestore. Passing in `null` removes the current converter.
|
||||
* @return {CollectionGroup} A `CollectionGroup` that uses the provided
|
||||
* converter.
|
||||
*/
|
||||
withConverter(converter: null): CollectionGroup;
|
||||
withConverter<NewAppModelType, NewDbModelType extends firestore.DocumentData = firestore.DocumentData>(converter: firestore.FirestoreDataConverter<NewAppModelType, NewDbModelType>): CollectionGroup<NewAppModelType, NewDbModelType>;
|
||||
}
|
||||
99
server/node_modules/@google-cloud/firestore/build/src/collection-group.js
generated
vendored
Normal file
99
server/node_modules/@google-cloud/firestore/build/src/collection-group.js
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
"use strict";
|
||||
/*
|
||||
* Copyright 2020 Google LLC
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.CollectionGroup = void 0;
|
||||
const query_partition_1 = require("./query-partition");
|
||||
const util_1 = require("./util");
|
||||
const logger_1 = require("./logger");
|
||||
const query_1 = require("./reference/query");
|
||||
const query_options_1 = require("./reference/query-options");
|
||||
const path_1 = require("./path");
|
||||
const validate_1 = require("./validate");
|
||||
const types_1 = require("./types");
|
||||
const order_1 = require("./order");
|
||||
const trace_util_1 = require("./telemetry/trace-util");
|
||||
/**
|
||||
* A `CollectionGroup` refers to all documents that are contained in a
|
||||
* collection or subcollection with a specific collection ID.
|
||||
*
|
||||
* @class CollectionGroup
|
||||
*/
|
||||
class CollectionGroup extends query_1.Query {
|
||||
/** @private */
|
||||
constructor(firestore, collectionId, converter) {
|
||||
super(firestore, query_options_1.QueryOptions.forCollectionGroupQuery(collectionId, converter));
|
||||
}
|
||||
/**
|
||||
* Partitions a query by returning partition cursors that can be used to run
|
||||
* the query in parallel. The returned cursors are split points that can be
|
||||
* used as starting and end points for individual query invocations.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* const query = firestore.collectionGroup('collectionId');
|
||||
* for await (const partition of query.getPartitions(42)) {
|
||||
* const partitionedQuery = partition.toQuery();
|
||||
* const querySnapshot = await partitionedQuery.get();
|
||||
* console.log(`Partition contained ${querySnapshot.length} documents`);
|
||||
* }
|
||||
*
|
||||
* ```
|
||||
* @param {number} desiredPartitionCount The desired maximum number of
|
||||
* partition points. The number must be strictly positive. The actual number
|
||||
* of partitions returned may be fewer.
|
||||
* @return {AsyncIterable<QueryPartition>} An AsyncIterable of
|
||||
* `QueryPartition`s.
|
||||
*/
|
||||
async *getPartitions(desiredPartitionCount) {
|
||||
const partitions = [];
|
||||
await this._firestore._traceUtil.startActiveSpan(trace_util_1.SPAN_NAME_PARTITION_QUERY, async () => {
|
||||
var _a;
|
||||
(0, validate_1.validateInteger)('desiredPartitionCount', desiredPartitionCount, {
|
||||
minValue: 1,
|
||||
});
|
||||
const tag = (0, util_1.requestTag)();
|
||||
await this.firestore.initializeIfNeeded(tag);
|
||||
if (desiredPartitionCount > 1) {
|
||||
// Partition queries require explicit ordering by __name__.
|
||||
const queryWithDefaultOrder = this.orderBy(path_1.FieldPath.documentId());
|
||||
const request = queryWithDefaultOrder.toProto();
|
||||
// Since we are always returning an extra partition (with an empty endBefore
|
||||
// cursor), we reduce the desired partition count by one.
|
||||
request.partitionCount = desiredPartitionCount - 1;
|
||||
const stream = await this.firestore.requestStream('partitionQueryStream',
|
||||
/* bidirectional= */ false, request, tag);
|
||||
stream.resume();
|
||||
for await (const currentCursor of stream) {
|
||||
partitions.push((_a = currentCursor.values) !== null && _a !== void 0 ? _a : []);
|
||||
}
|
||||
}
|
||||
(0, logger_1.logger)('Firestore.getPartitions', tag, 'Received %d partitions', partitions.length);
|
||||
// Sort the partitions as they may not be ordered if responses are paged.
|
||||
partitions.sort((l, r) => (0, order_1.compareArrays)(l, r));
|
||||
});
|
||||
for (let i = 0; i < partitions.length; ++i) {
|
||||
yield new query_partition_1.QueryPartition(this._firestore, this._queryOptions.collectionId, this._queryOptions.converter, i > 0 ? partitions[i - 1] : undefined, partitions[i]);
|
||||
}
|
||||
// Return the extra partition with the empty cursor.
|
||||
yield new query_partition_1.QueryPartition(this._firestore, this._queryOptions.collectionId, this._queryOptions.converter, partitions.pop(), undefined);
|
||||
}
|
||||
withConverter(converter) {
|
||||
return new CollectionGroup(this.firestore, this._queryOptions.collectionId, converter !== null && converter !== void 0 ? converter : (0, types_1.defaultConverter)());
|
||||
}
|
||||
}
|
||||
exports.CollectionGroup = CollectionGroup;
|
||||
//# sourceMappingURL=collection-group.js.map
|
||||
81
server/node_modules/@google-cloud/firestore/build/src/convert.d.ts
generated
vendored
Normal file
81
server/node_modules/@google-cloud/firestore/build/src/convert.d.ts
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
/*!
|
||||
* Copyright 2019 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import { google } from '../protos/firestore_v1_proto_api';
|
||||
import { ApiMapValue, ProtobufJsValue } from './types';
|
||||
import api = google.firestore.v1;
|
||||
/*!
|
||||
* @module firestore/convert
|
||||
* @private
|
||||
* @internal
|
||||
*
|
||||
* This module contains utility functions to convert
|
||||
* `firestore.v1.Documents` from Proto3 JSON to their equivalent
|
||||
* representation in Protobuf JS. Protobuf JS is the only encoding supported by
|
||||
* this client, and dependencies that use Proto3 JSON (such as the Google Cloud
|
||||
* Functions SDK) are supported through this conversion and its usage in
|
||||
* {@see Firestore#snapshot_}.
|
||||
*/
|
||||
/**
|
||||
* Converts an ISO 8601 or google.protobuf.Timestamp proto into Protobuf JS.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param timestampValue The value to convert.
|
||||
* @param argumentName The argument name to use in the error message if the
|
||||
* conversion fails. If omitted, 'timestampValue' is used.
|
||||
* @return The value as expected by Protobuf JS or undefined if no input was
|
||||
* provided.
|
||||
*/
|
||||
export declare function timestampFromJson(timestampValue?: string | google.protobuf.ITimestamp, argumentName?: string): google.protobuf.ITimestamp | undefined;
|
||||
/**
|
||||
* Detects 'valueType' from a Proto3 JSON `firestore.v1.Value` proto.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param proto The `firestore.v1.Value` proto.
|
||||
* @return The string value for 'valueType'.
|
||||
*/
|
||||
export declare function detectValueType(proto: ProtobufJsValue): string;
|
||||
/**
|
||||
* Detects the value kind from a Proto3 JSON `google.protobuf.Value` proto.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param proto The `firestore.v1.Value` proto.
|
||||
* @return The string value for 'valueType'.
|
||||
*/
|
||||
export declare function detectGoogleProtobufValueType(proto: google.protobuf.IValue): string;
|
||||
/**
|
||||
* Converts a `firestore.v1.Value` in Proto3 JSON encoding into the
|
||||
* Protobuf JS format expected by this client.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param fieldValue The `firestore.v1.Value` in Proto3 JSON format.
|
||||
* @return The `firestore.v1.Value` in Protobuf JS format.
|
||||
*/
|
||||
export declare function valueFromJson(fieldValue: api.IValue): api.IValue;
|
||||
/**
|
||||
* Converts a map of IValues in Proto3 JSON encoding into the Protobuf JS format
|
||||
* expected by this client. This conversion creates a copy of the underlying
|
||||
* fields.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param document An object with IValues in Proto3 JSON format.
|
||||
* @return The object in Protobuf JS format.
|
||||
*/
|
||||
export declare function fieldsFromJson(document: ApiMapValue): ApiMapValue;
|
||||
267
server/node_modules/@google-cloud/firestore/build/src/convert.js
generated
vendored
Normal file
267
server/node_modules/@google-cloud/firestore/build/src/convert.js
generated
vendored
Normal file
@@ -0,0 +1,267 @@
|
||||
"use strict";
|
||||
/*!
|
||||
* Copyright 2019 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.timestampFromJson = timestampFromJson;
|
||||
exports.detectValueType = detectValueType;
|
||||
exports.detectGoogleProtobufValueType = detectGoogleProtobufValueType;
|
||||
exports.valueFromJson = valueFromJson;
|
||||
exports.fieldsFromJson = fieldsFromJson;
|
||||
const validate_1 = require("./validate");
|
||||
const map_type_1 = require("./map-type");
|
||||
/*!
|
||||
* @module firestore/convert
|
||||
* @private
|
||||
* @internal
|
||||
*
|
||||
* This module contains utility functions to convert
|
||||
* `firestore.v1.Documents` from Proto3 JSON to their equivalent
|
||||
* representation in Protobuf JS. Protobuf JS is the only encoding supported by
|
||||
* this client, and dependencies that use Proto3 JSON (such as the Google Cloud
|
||||
* Functions SDK) are supported through this conversion and its usage in
|
||||
* {@see Firestore#snapshot_}.
|
||||
*/
|
||||
/**
|
||||
* Converts an ISO 8601 or google.protobuf.Timestamp proto into Protobuf JS.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param timestampValue The value to convert.
|
||||
* @param argumentName The argument name to use in the error message if the
|
||||
* conversion fails. If omitted, 'timestampValue' is used.
|
||||
* @return The value as expected by Protobuf JS or undefined if no input was
|
||||
* provided.
|
||||
*/
|
||||
function timestampFromJson(timestampValue, argumentName) {
|
||||
let timestampProto = {};
|
||||
if (typeof timestampValue === 'string') {
|
||||
const date = new Date(timestampValue);
|
||||
const seconds = Math.floor(date.getTime() / 1000);
|
||||
let nanos = 0;
|
||||
if (timestampValue.length > 20) {
|
||||
const nanoString = timestampValue.substring(20, timestampValue.length - 1);
|
||||
const trailingZeroes = 9 - nanoString.length;
|
||||
nanos = Number(nanoString) * Math.pow(10, trailingZeroes);
|
||||
}
|
||||
if (isNaN(seconds) || isNaN(nanos)) {
|
||||
argumentName = argumentName || 'timestampValue';
|
||||
throw new Error(`Specify a valid ISO 8601 timestamp for "${argumentName}".`);
|
||||
}
|
||||
timestampProto = {
|
||||
seconds: seconds || undefined,
|
||||
nanos: nanos || undefined,
|
||||
};
|
||||
}
|
||||
else if (timestampValue !== undefined) {
|
||||
(0, validate_1.validateObject)('timestampValue', timestampValue);
|
||||
timestampProto = {
|
||||
seconds: timestampValue.seconds || undefined,
|
||||
nanos: timestampValue.nanos || undefined,
|
||||
};
|
||||
}
|
||||
return timestampProto;
|
||||
}
|
||||
/**
|
||||
* Converts a Proto3 JSON 'bytesValue' field into Protobuf JS.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param bytesValue The value to convert.
|
||||
* @return The value as expected by Protobuf JS.
|
||||
*/
|
||||
function bytesFromJson(bytesValue) {
|
||||
if (typeof bytesValue === 'string') {
|
||||
return Buffer.from(bytesValue, 'base64');
|
||||
}
|
||||
else {
|
||||
return bytesValue;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Detects 'valueType' from a Proto3 JSON `firestore.v1.Value` proto.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param proto The `firestore.v1.Value` proto.
|
||||
* @return The string value for 'valueType'.
|
||||
*/
|
||||
function detectValueType(proto) {
|
||||
var _a;
|
||||
let valueType;
|
||||
if (proto.valueType) {
|
||||
valueType = proto.valueType;
|
||||
}
|
||||
else {
|
||||
const detectedValues = [];
|
||||
if (proto.stringValue !== undefined) {
|
||||
detectedValues.push('stringValue');
|
||||
}
|
||||
if (proto.booleanValue !== undefined) {
|
||||
detectedValues.push('booleanValue');
|
||||
}
|
||||
if (proto.integerValue !== undefined) {
|
||||
detectedValues.push('integerValue');
|
||||
}
|
||||
if (proto.doubleValue !== undefined) {
|
||||
detectedValues.push('doubleValue');
|
||||
}
|
||||
if (proto.timestampValue !== undefined) {
|
||||
detectedValues.push('timestampValue');
|
||||
}
|
||||
if (proto.referenceValue !== undefined) {
|
||||
detectedValues.push('referenceValue');
|
||||
}
|
||||
if (proto.arrayValue !== undefined) {
|
||||
detectedValues.push('arrayValue');
|
||||
}
|
||||
if (proto.nullValue !== undefined) {
|
||||
detectedValues.push('nullValue');
|
||||
}
|
||||
if (proto.mapValue !== undefined) {
|
||||
detectedValues.push('mapValue');
|
||||
}
|
||||
if (proto.geoPointValue !== undefined) {
|
||||
detectedValues.push('geoPointValue');
|
||||
}
|
||||
if (proto.bytesValue !== undefined) {
|
||||
detectedValues.push('bytesValue');
|
||||
}
|
||||
if (detectedValues.length !== 1) {
|
||||
throw new Error(`Unable to infer type value from '${JSON.stringify(proto)}'.`);
|
||||
}
|
||||
valueType = detectedValues[0];
|
||||
}
|
||||
// Special handling of mapValues used to represent other data types
|
||||
if (valueType === 'mapValue') {
|
||||
const fields = (_a = proto.mapValue) === null || _a === void 0 ? void 0 : _a.fields;
|
||||
if (fields) {
|
||||
const props = Object.keys(fields);
|
||||
if (props.indexOf(map_type_1.RESERVED_MAP_KEY) !== -1 &&
|
||||
detectValueType(fields[map_type_1.RESERVED_MAP_KEY]) === 'stringValue' &&
|
||||
fields[map_type_1.RESERVED_MAP_KEY].stringValue === map_type_1.RESERVED_MAP_KEY_VECTOR_VALUE) {
|
||||
valueType = 'vectorValue';
|
||||
}
|
||||
}
|
||||
}
|
||||
return valueType;
|
||||
}
|
||||
/**
|
||||
* Detects the value kind from a Proto3 JSON `google.protobuf.Value` proto.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param proto The `firestore.v1.Value` proto.
|
||||
* @return The string value for 'valueType'.
|
||||
*/
|
||||
function detectGoogleProtobufValueType(proto) {
|
||||
const detectedValues = [];
|
||||
if (proto.nullValue !== undefined) {
|
||||
detectedValues.push('nullValue');
|
||||
}
|
||||
if (proto.numberValue !== undefined) {
|
||||
detectedValues.push('numberValue');
|
||||
}
|
||||
if (proto.stringValue !== undefined) {
|
||||
detectedValues.push('stringValue');
|
||||
}
|
||||
if (proto.boolValue !== undefined) {
|
||||
detectedValues.push('boolValue');
|
||||
}
|
||||
if (proto.structValue !== undefined) {
|
||||
detectedValues.push('structValue');
|
||||
}
|
||||
if (proto.listValue !== undefined) {
|
||||
detectedValues.push('listValue');
|
||||
}
|
||||
if (detectedValues.length !== 1) {
|
||||
throw new Error(`Unable to infer type value from '${JSON.stringify(proto)}'.`);
|
||||
}
|
||||
return detectedValues[0];
|
||||
}
|
||||
/**
|
||||
* Converts a `firestore.v1.Value` in Proto3 JSON encoding into the
|
||||
* Protobuf JS format expected by this client.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param fieldValue The `firestore.v1.Value` in Proto3 JSON format.
|
||||
* @return The `firestore.v1.Value` in Protobuf JS format.
|
||||
*/
|
||||
function valueFromJson(fieldValue) {
|
||||
const valueType = detectValueType(fieldValue);
|
||||
switch (valueType) {
|
||||
case 'timestampValue':
|
||||
return {
|
||||
timestampValue: timestampFromJson(fieldValue.timestampValue),
|
||||
};
|
||||
case 'bytesValue':
|
||||
return {
|
||||
bytesValue: bytesFromJson(fieldValue.bytesValue),
|
||||
};
|
||||
case 'doubleValue':
|
||||
return {
|
||||
doubleValue: Number(fieldValue.doubleValue),
|
||||
};
|
||||
case 'arrayValue': {
|
||||
const arrayValue = [];
|
||||
if (Array.isArray(fieldValue.arrayValue.values)) {
|
||||
for (const value of fieldValue.arrayValue.values) {
|
||||
arrayValue.push(valueFromJson(value));
|
||||
}
|
||||
}
|
||||
return {
|
||||
arrayValue: {
|
||||
values: arrayValue,
|
||||
},
|
||||
};
|
||||
}
|
||||
case 'mapValue':
|
||||
case 'vectorValue': {
|
||||
const mapValue = {};
|
||||
const fields = fieldValue.mapValue.fields;
|
||||
if (fields) {
|
||||
for (const prop of Object.keys(fields)) {
|
||||
mapValue[prop] = valueFromJson(fieldValue.mapValue.fields[prop]);
|
||||
}
|
||||
}
|
||||
return {
|
||||
mapValue: {
|
||||
fields: mapValue,
|
||||
},
|
||||
};
|
||||
}
|
||||
default:
|
||||
return fieldValue;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Converts a map of IValues in Proto3 JSON encoding into the Protobuf JS format
|
||||
* expected by this client. This conversion creates a copy of the underlying
|
||||
* fields.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param document An object with IValues in Proto3 JSON format.
|
||||
* @return The object in Protobuf JS format.
|
||||
*/
|
||||
function fieldsFromJson(document) {
|
||||
const result = {};
|
||||
for (const prop of Object.keys(document)) {
|
||||
result[prop] = valueFromJson(document[prop]);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
//# sourceMappingURL=convert.js.map
|
||||
155
server/node_modules/@google-cloud/firestore/build/src/document-change.d.ts
generated
vendored
Normal file
155
server/node_modules/@google-cloud/firestore/build/src/document-change.d.ts
generated
vendored
Normal file
@@ -0,0 +1,155 @@
|
||||
/*!
|
||||
* Copyright 2018 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import * as firestore from '@google-cloud/firestore';
|
||||
import { QueryDocumentSnapshot } from './document';
|
||||
export type DocumentChangeType = 'added' | 'removed' | 'modified';
|
||||
/**
|
||||
* A DocumentChange represents a change to the documents matching a query.
|
||||
* It contains the document affected and the type of change that occurred.
|
||||
*
|
||||
* @class DocumentChange
|
||||
*/
|
||||
export declare class DocumentChange<AppModelType = firestore.DocumentData, DbModelType extends firestore.DocumentData = firestore.DocumentData> implements firestore.DocumentChange<AppModelType, DbModelType> {
|
||||
private readonly _type;
|
||||
private readonly _document;
|
||||
private readonly _oldIndex;
|
||||
private readonly _newIndex;
|
||||
/**
|
||||
* @private
|
||||
*
|
||||
* @param {string} type 'added' | 'removed' | 'modified'.
|
||||
* @param {QueryDocumentSnapshot} document The document.
|
||||
* @param {number} oldIndex The index in the documents array prior to this
|
||||
* change.
|
||||
* @param {number} newIndex The index in the documents array after this
|
||||
* change.
|
||||
*/
|
||||
constructor(type: DocumentChangeType, document: QueryDocumentSnapshot<AppModelType, DbModelType>, oldIndex: number, newIndex: number);
|
||||
/**
|
||||
* The type of change ('added', 'modified', or 'removed').
|
||||
*
|
||||
* @type {string}
|
||||
* @name DocumentChange#type
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let query = firestore.collection('col').where('foo', '==', 'bar');
|
||||
* let docsArray = [];
|
||||
*
|
||||
* let unsubscribe = query.onSnapshot(querySnapshot => {
|
||||
* for (let change of querySnapshot.docChanges) {
|
||||
* console.log(`Type of change is ${change.type}`);
|
||||
* }
|
||||
* });
|
||||
*
|
||||
* // Remove this listener.
|
||||
* unsubscribe();
|
||||
* ```
|
||||
*/
|
||||
get type(): DocumentChangeType;
|
||||
/**
|
||||
* The document affected by this change.
|
||||
*
|
||||
* @type {QueryDocumentSnapshot}
|
||||
* @name DocumentChange#doc
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let query = firestore.collection('col').where('foo', '==', 'bar');
|
||||
*
|
||||
* let unsubscribe = query.onSnapshot(querySnapshot => {
|
||||
* for (let change of querySnapshot.docChanges) {
|
||||
* console.log(change.doc.data());
|
||||
* }
|
||||
* });
|
||||
*
|
||||
* // Remove this listener.
|
||||
* unsubscribe();
|
||||
* ```
|
||||
*/
|
||||
get doc(): QueryDocumentSnapshot<AppModelType, DbModelType>;
|
||||
/**
|
||||
* The index of the changed document in the result set immediately prior to
|
||||
* this DocumentChange (i.e. supposing that all prior DocumentChange objects
|
||||
* have been applied). Is -1 for 'added' events.
|
||||
*
|
||||
* @type {number}
|
||||
* @name DocumentChange#oldIndex
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let query = firestore.collection('col').where('foo', '==', 'bar');
|
||||
* let docsArray = [];
|
||||
*
|
||||
* let unsubscribe = query.onSnapshot(querySnapshot => {
|
||||
* for (let change of querySnapshot.docChanges) {
|
||||
* if (change.oldIndex !== -1) {
|
||||
* docsArray.splice(change.oldIndex, 1);
|
||||
* }
|
||||
* if (change.newIndex !== -1) {
|
||||
* docsArray.splice(change.newIndex, 0, change.doc);
|
||||
* }
|
||||
* }
|
||||
* });
|
||||
*
|
||||
* // Remove this listener.
|
||||
* unsubscribe();
|
||||
* ```
|
||||
*/
|
||||
get oldIndex(): number;
|
||||
/**
|
||||
* The index of the changed document in the result set immediately after
|
||||
* this DocumentChange (i.e. supposing that all prior DocumentChange
|
||||
* objects and the current DocumentChange object have been applied).
|
||||
* Is -1 for 'removed' events.
|
||||
*
|
||||
* @type {number}
|
||||
* @name DocumentChange#newIndex
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let query = firestore.collection('col').where('foo', '==', 'bar');
|
||||
* let docsArray = [];
|
||||
*
|
||||
* let unsubscribe = query.onSnapshot(querySnapshot => {
|
||||
* for (let change of querySnapshot.docChanges) {
|
||||
* if (change.oldIndex !== -1) {
|
||||
* docsArray.splice(change.oldIndex, 1);
|
||||
* }
|
||||
* if (change.newIndex !== -1) {
|
||||
* docsArray.splice(change.newIndex, 0, change.doc);
|
||||
* }
|
||||
* }
|
||||
* });
|
||||
*
|
||||
* // Remove this listener.
|
||||
* unsubscribe();
|
||||
* ```
|
||||
*/
|
||||
get newIndex(): number;
|
||||
/**
|
||||
* Returns true if the data in this `DocumentChange` is equal to the provided
|
||||
* value.
|
||||
*
|
||||
* @param {*} other The value to compare against.
|
||||
* @return true if this `DocumentChange` is equal to the provided value.
|
||||
*/
|
||||
isEqual(other: firestore.DocumentChange<AppModelType, DbModelType>): boolean;
|
||||
}
|
||||
175
server/node_modules/@google-cloud/firestore/build/src/document-change.js
generated
vendored
Normal file
175
server/node_modules/@google-cloud/firestore/build/src/document-change.js
generated
vendored
Normal file
@@ -0,0 +1,175 @@
|
||||
"use strict";
|
||||
/*!
|
||||
* Copyright 2018 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.DocumentChange = void 0;
|
||||
/**
|
||||
* A DocumentChange represents a change to the documents matching a query.
|
||||
* It contains the document affected and the type of change that occurred.
|
||||
*
|
||||
* @class DocumentChange
|
||||
*/
|
||||
class DocumentChange {
|
||||
/**
|
||||
* @private
|
||||
*
|
||||
* @param {string} type 'added' | 'removed' | 'modified'.
|
||||
* @param {QueryDocumentSnapshot} document The document.
|
||||
* @param {number} oldIndex The index in the documents array prior to this
|
||||
* change.
|
||||
* @param {number} newIndex The index in the documents array after this
|
||||
* change.
|
||||
*/
|
||||
constructor(type, document, oldIndex, newIndex) {
|
||||
this._type = type;
|
||||
this._document = document;
|
||||
this._oldIndex = oldIndex;
|
||||
this._newIndex = newIndex;
|
||||
}
|
||||
/**
|
||||
* The type of change ('added', 'modified', or 'removed').
|
||||
*
|
||||
* @type {string}
|
||||
* @name DocumentChange#type
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let query = firestore.collection('col').where('foo', '==', 'bar');
|
||||
* let docsArray = [];
|
||||
*
|
||||
* let unsubscribe = query.onSnapshot(querySnapshot => {
|
||||
* for (let change of querySnapshot.docChanges) {
|
||||
* console.log(`Type of change is ${change.type}`);
|
||||
* }
|
||||
* });
|
||||
*
|
||||
* // Remove this listener.
|
||||
* unsubscribe();
|
||||
* ```
|
||||
*/
|
||||
get type() {
|
||||
return this._type;
|
||||
}
|
||||
/**
|
||||
* The document affected by this change.
|
||||
*
|
||||
* @type {QueryDocumentSnapshot}
|
||||
* @name DocumentChange#doc
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let query = firestore.collection('col').where('foo', '==', 'bar');
|
||||
*
|
||||
* let unsubscribe = query.onSnapshot(querySnapshot => {
|
||||
* for (let change of querySnapshot.docChanges) {
|
||||
* console.log(change.doc.data());
|
||||
* }
|
||||
* });
|
||||
*
|
||||
* // Remove this listener.
|
||||
* unsubscribe();
|
||||
* ```
|
||||
*/
|
||||
get doc() {
|
||||
return this._document;
|
||||
}
|
||||
/**
|
||||
* The index of the changed document in the result set immediately prior to
|
||||
* this DocumentChange (i.e. supposing that all prior DocumentChange objects
|
||||
* have been applied). Is -1 for 'added' events.
|
||||
*
|
||||
* @type {number}
|
||||
* @name DocumentChange#oldIndex
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let query = firestore.collection('col').where('foo', '==', 'bar');
|
||||
* let docsArray = [];
|
||||
*
|
||||
* let unsubscribe = query.onSnapshot(querySnapshot => {
|
||||
* for (let change of querySnapshot.docChanges) {
|
||||
* if (change.oldIndex !== -1) {
|
||||
* docsArray.splice(change.oldIndex, 1);
|
||||
* }
|
||||
* if (change.newIndex !== -1) {
|
||||
* docsArray.splice(change.newIndex, 0, change.doc);
|
||||
* }
|
||||
* }
|
||||
* });
|
||||
*
|
||||
* // Remove this listener.
|
||||
* unsubscribe();
|
||||
* ```
|
||||
*/
|
||||
get oldIndex() {
|
||||
return this._oldIndex;
|
||||
}
|
||||
/**
|
||||
* The index of the changed document in the result set immediately after
|
||||
* this DocumentChange (i.e. supposing that all prior DocumentChange
|
||||
* objects and the current DocumentChange object have been applied).
|
||||
* Is -1 for 'removed' events.
|
||||
*
|
||||
* @type {number}
|
||||
* @name DocumentChange#newIndex
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let query = firestore.collection('col').where('foo', '==', 'bar');
|
||||
* let docsArray = [];
|
||||
*
|
||||
* let unsubscribe = query.onSnapshot(querySnapshot => {
|
||||
* for (let change of querySnapshot.docChanges) {
|
||||
* if (change.oldIndex !== -1) {
|
||||
* docsArray.splice(change.oldIndex, 1);
|
||||
* }
|
||||
* if (change.newIndex !== -1) {
|
||||
* docsArray.splice(change.newIndex, 0, change.doc);
|
||||
* }
|
||||
* }
|
||||
* });
|
||||
*
|
||||
* // Remove this listener.
|
||||
* unsubscribe();
|
||||
* ```
|
||||
*/
|
||||
get newIndex() {
|
||||
return this._newIndex;
|
||||
}
|
||||
/**
|
||||
* Returns true if the data in this `DocumentChange` is equal to the provided
|
||||
* value.
|
||||
*
|
||||
* @param {*} other The value to compare against.
|
||||
* @return true if this `DocumentChange` is equal to the provided value.
|
||||
*/
|
||||
isEqual(other) {
|
||||
if (this === other) {
|
||||
return true;
|
||||
}
|
||||
return (other instanceof DocumentChange &&
|
||||
this._type === other._type &&
|
||||
this._oldIndex === other._oldIndex &&
|
||||
this._newIndex === other._newIndex &&
|
||||
this._document.isEqual(other._document));
|
||||
}
|
||||
}
|
||||
exports.DocumentChange = DocumentChange;
|
||||
//# sourceMappingURL=document-change.js.map
|
||||
74
server/node_modules/@google-cloud/firestore/build/src/document-reader.d.ts
generated
vendored
Normal file
74
server/node_modules/@google-cloud/firestore/build/src/document-reader.d.ts
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
/*!
|
||||
* Copyright 2021 Google LLC. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import { DocumentSnapshot } from './document';
|
||||
import { DocumentReference } from './reference/document-reference';
|
||||
import { FieldPath } from './path';
|
||||
import { google } from '../protos/firestore_v1_proto_api';
|
||||
import { Firestore } from './index';
|
||||
import { Timestamp } from './timestamp';
|
||||
import { DocumentData } from '@google-cloud/firestore';
|
||||
import api = google.firestore.v1;
|
||||
interface BatchGetResponse<AppModelType, DbModelType extends DocumentData> {
|
||||
result: Array<DocumentSnapshot<AppModelType, DbModelType>>;
|
||||
/**
|
||||
* The transaction that was started as part of this request. Will only be if
|
||||
* `DocumentReader.transactionIdOrNewTransaction` was `api.ITransactionOptions`.
|
||||
*/
|
||||
transaction?: Uint8Array;
|
||||
}
|
||||
/**
|
||||
* A wrapper around BatchGetDocumentsRequest that retries request upon stream
|
||||
* failure and returns ordered results.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare class DocumentReader<AppModelType, DbModelType extends DocumentData> {
|
||||
private readonly firestore;
|
||||
private readonly allDocuments;
|
||||
private readonly fieldMask?;
|
||||
private readonly transactionOrReadTime?;
|
||||
private readonly outstandingDocuments;
|
||||
private readonly retrievedDocuments;
|
||||
private retrievedTransactionId?;
|
||||
/**
|
||||
* Creates a new DocumentReader that fetches the provided documents (via
|
||||
* `get()`).
|
||||
*
|
||||
* @param firestore The Firestore instance to use.
|
||||
* @param allDocuments The documents to get.
|
||||
* @param fieldMask An optional field mask to apply to this read
|
||||
* @param transactionOrReadTime An optional transaction ID to use for this
|
||||
* read or options for beginning a new transaction with this read
|
||||
*/
|
||||
constructor(firestore: Firestore, allDocuments: ReadonlyArray<DocumentReference<AppModelType, DbModelType>>, fieldMask?: FieldPath[] | undefined, transactionOrReadTime?: (Uint8Array | api.ITransactionOptions | Timestamp) | undefined);
|
||||
/**
|
||||
* Invokes the BatchGetDocuments RPC and returns the results as an array of
|
||||
* documents.
|
||||
*
|
||||
* @param requestTag A unique client-assigned identifier for this request.
|
||||
*/
|
||||
get(requestTag: string): Promise<Array<DocumentSnapshot<AppModelType, DbModelType>>>;
|
||||
/**
|
||||
* Invokes the BatchGetDocuments RPC and returns the results with transaction
|
||||
* metadata.
|
||||
*
|
||||
* @param requestTag A unique client-assigned identifier for this request.
|
||||
*/
|
||||
_get(requestTag: string): Promise<BatchGetResponse<AppModelType, DbModelType>>;
|
||||
private fetchDocuments;
|
||||
}
|
||||
export {};
|
||||
167
server/node_modules/@google-cloud/firestore/build/src/document-reader.js
generated
vendored
Normal file
167
server/node_modules/@google-cloud/firestore/build/src/document-reader.js
generated
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
"use strict";
|
||||
/*!
|
||||
* Copyright 2021 Google LLC. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.DocumentReader = void 0;
|
||||
const document_1 = require("./document");
|
||||
const util_1 = require("./util");
|
||||
const logger_1 = require("./logger");
|
||||
const timestamp_1 = require("./timestamp");
|
||||
/**
|
||||
* A wrapper around BatchGetDocumentsRequest that retries request upon stream
|
||||
* failure and returns ordered results.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class DocumentReader {
|
||||
/**
|
||||
* Creates a new DocumentReader that fetches the provided documents (via
|
||||
* `get()`).
|
||||
*
|
||||
* @param firestore The Firestore instance to use.
|
||||
* @param allDocuments The documents to get.
|
||||
* @param fieldMask An optional field mask to apply to this read
|
||||
* @param transactionOrReadTime An optional transaction ID to use for this
|
||||
* read or options for beginning a new transaction with this read
|
||||
*/
|
||||
constructor(firestore, allDocuments, fieldMask, transactionOrReadTime) {
|
||||
this.firestore = firestore;
|
||||
this.allDocuments = allDocuments;
|
||||
this.fieldMask = fieldMask;
|
||||
this.transactionOrReadTime = transactionOrReadTime;
|
||||
this.outstandingDocuments = new Set();
|
||||
this.retrievedDocuments = new Map();
|
||||
for (const docRef of this.allDocuments) {
|
||||
this.outstandingDocuments.add(docRef.formattedName);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Invokes the BatchGetDocuments RPC and returns the results as an array of
|
||||
* documents.
|
||||
*
|
||||
* @param requestTag A unique client-assigned identifier for this request.
|
||||
*/
|
||||
async get(requestTag) {
|
||||
const { result } = await this._get(requestTag);
|
||||
return result;
|
||||
}
|
||||
/**
|
||||
* Invokes the BatchGetDocuments RPC and returns the results with transaction
|
||||
* metadata.
|
||||
*
|
||||
* @param requestTag A unique client-assigned identifier for this request.
|
||||
*/
|
||||
async _get(requestTag) {
|
||||
await this.fetchDocuments(requestTag);
|
||||
// BatchGetDocuments doesn't preserve document order. We use the request
|
||||
// order to sort the resulting documents.
|
||||
const orderedDocuments = [];
|
||||
for (const docRef of this.allDocuments) {
|
||||
const document = this.retrievedDocuments.get(docRef.formattedName);
|
||||
if (document !== undefined) {
|
||||
// Recreate the DocumentSnapshot with the DocumentReference
|
||||
// containing the original converter.
|
||||
const finalDoc = new document_1.DocumentSnapshotBuilder(docRef);
|
||||
finalDoc.fieldsProto = document._fieldsProto;
|
||||
finalDoc.readTime = document.readTime;
|
||||
finalDoc.createTime = document.createTime;
|
||||
finalDoc.updateTime = document.updateTime;
|
||||
orderedDocuments.push(finalDoc.build());
|
||||
}
|
||||
else {
|
||||
throw new Error(`Did not receive document for "${docRef.path}".`);
|
||||
}
|
||||
}
|
||||
return {
|
||||
result: orderedDocuments,
|
||||
transaction: this.retrievedTransactionId,
|
||||
};
|
||||
}
|
||||
async fetchDocuments(requestTag) {
|
||||
var _a;
|
||||
if (!this.outstandingDocuments.size) {
|
||||
return;
|
||||
}
|
||||
const request = {
|
||||
database: this.firestore.formattedName,
|
||||
documents: Array.from(this.outstandingDocuments),
|
||||
};
|
||||
if (this.transactionOrReadTime instanceof Uint8Array) {
|
||||
request.transaction = this.transactionOrReadTime;
|
||||
}
|
||||
else if (this.transactionOrReadTime instanceof timestamp_1.Timestamp) {
|
||||
request.readTime = this.transactionOrReadTime.toProto().timestampValue;
|
||||
}
|
||||
else if (this.transactionOrReadTime) {
|
||||
request.newTransaction = this.transactionOrReadTime;
|
||||
}
|
||||
if (this.fieldMask) {
|
||||
const fieldPaths = this.fieldMask.map(fieldPath => fieldPath.formattedName);
|
||||
request.mask = { fieldPaths };
|
||||
}
|
||||
let resultCount = 0;
|
||||
try {
|
||||
const stream = await this.firestore.requestStream('batchGetDocuments',
|
||||
/* bidirectional= */ false, request, requestTag);
|
||||
stream.resume();
|
||||
for await (const response of stream) {
|
||||
// Proto comes with zero-length buffer by default
|
||||
if ((_a = response.transaction) === null || _a === void 0 ? void 0 : _a.length) {
|
||||
this.retrievedTransactionId = response.transaction;
|
||||
}
|
||||
let snapshot;
|
||||
if (response.found) {
|
||||
(0, logger_1.logger)('DocumentReader.fetchDocuments', requestTag, 'Received document: %s', response.found.name);
|
||||
snapshot = this.firestore.snapshot_(response.found, response.readTime);
|
||||
}
|
||||
else if (response.missing) {
|
||||
(0, logger_1.logger)('DocumentReader.fetchDocuments', requestTag, 'Document missing: %s', response.missing);
|
||||
snapshot = this.firestore.snapshot_(response.missing, response.readTime);
|
||||
}
|
||||
if (snapshot) {
|
||||
const path = snapshot.ref.formattedName;
|
||||
this.outstandingDocuments.delete(path);
|
||||
this.retrievedDocuments.set(path, snapshot);
|
||||
++resultCount;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
const shouldRetry =
|
||||
// Transactional reads are retried via the transaction runner.
|
||||
!request.transaction &&
|
||||
!request.newTransaction &&
|
||||
// Only retry if we made progress.
|
||||
resultCount > 0 &&
|
||||
// Don't retry permanent errors.
|
||||
error.code !== undefined &&
|
||||
!(0, util_1.isPermanentRpcError)(error, 'batchGetDocuments');
|
||||
(0, logger_1.logger)('DocumentReader.fetchDocuments', requestTag, 'BatchGetDocuments failed with error: %s. Retrying: %s', error, shouldRetry);
|
||||
if (shouldRetry) {
|
||||
return this.fetchDocuments(requestTag);
|
||||
}
|
||||
else {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
finally {
|
||||
(0, logger_1.logger)('DocumentReader.fetchDocuments', requestTag, 'Received %d results', resultCount);
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.DocumentReader = DocumentReader;
|
||||
//# sourceMappingURL=document-reader.js.map
|
||||
594
server/node_modules/@google-cloud/firestore/build/src/document.d.ts
generated
vendored
Normal file
594
server/node_modules/@google-cloud/firestore/build/src/document.d.ts
generated
vendored
Normal file
@@ -0,0 +1,594 @@
|
||||
/*!
|
||||
* Copyright 2019 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import * as firestore from '@google-cloud/firestore';
|
||||
import { google } from '../protos/firestore_v1_proto_api';
|
||||
import { FieldTransform } from './field-value';
|
||||
import { FieldPath } from './path';
|
||||
import { DocumentReference } from './reference/document-reference';
|
||||
import { Serializer } from './serializer';
|
||||
import { Timestamp } from './timestamp';
|
||||
import { ApiMapValue, UpdateMap } from './types';
|
||||
import api = google.firestore.v1;
|
||||
/**
|
||||
* Returns a builder for DocumentSnapshot and QueryDocumentSnapshot instances.
|
||||
* Invoke `.build()' to assemble the final snapshot.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare class DocumentSnapshotBuilder<AppModelType = firestore.DocumentData, DbModelType extends firestore.DocumentData = firestore.DocumentData> {
|
||||
readonly ref: DocumentReference<AppModelType, DbModelType>;
|
||||
/** The fields of the Firestore `Document` Protobuf backing this document. */
|
||||
fieldsProto?: ApiMapValue;
|
||||
/** The time when this document was read. */
|
||||
readTime?: Timestamp;
|
||||
/** The time when this document was created. */
|
||||
createTime?: Timestamp;
|
||||
/** The time when this document was last updated. */
|
||||
updateTime?: Timestamp;
|
||||
constructor(ref: DocumentReference<AppModelType, DbModelType>);
|
||||
/**
|
||||
* Builds the DocumentSnapshot.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns Returns either a QueryDocumentSnapshot (if `fieldsProto` was
|
||||
* provided) or a DocumentSnapshot.
|
||||
*/
|
||||
build(): QueryDocumentSnapshot<AppModelType, DbModelType> | DocumentSnapshot<AppModelType, DbModelType>;
|
||||
}
|
||||
/**
|
||||
* A DocumentSnapshot is an immutable representation for a document in a
|
||||
* Firestore database. The data can be extracted with
|
||||
* [data()]{@link DocumentSnapshot#data} or
|
||||
* [get(fieldPath)]{@link DocumentSnapshot#get} to get a
|
||||
* specific field.
|
||||
*
|
||||
* <p>For a DocumentSnapshot that points to a non-existing document, any data
|
||||
* access will return 'undefined'. You can use the
|
||||
* [exists]{@link DocumentSnapshot#exists} property to explicitly verify a
|
||||
* document's existence.
|
||||
*
|
||||
* @class DocumentSnapshot
|
||||
*/
|
||||
export declare class DocumentSnapshot<AppModelType = firestore.DocumentData, DbModelType extends firestore.DocumentData = firestore.DocumentData> implements firestore.DocumentSnapshot<AppModelType, DbModelType> {
|
||||
/**
|
||||
* @internal
|
||||
* @private
|
||||
**/
|
||||
readonly _fieldsProto?: ApiMapValue | undefined;
|
||||
private _ref;
|
||||
private _serializer;
|
||||
private _readTime;
|
||||
private _createTime;
|
||||
private _updateTime;
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*
|
||||
* @param ref The reference to the document.
|
||||
* @param _fieldsProto The fields of the Firestore `Document` Protobuf backing
|
||||
* this document (or undefined if the document does not exist).
|
||||
* @param readTime The time when this snapshot was read (or undefined if
|
||||
* the document exists only locally).
|
||||
* @param createTime The time when the document was created (or undefined if
|
||||
* the document does not exist).
|
||||
* @param updateTime The time when the document was last updated (or undefined
|
||||
* if the document does not exist).
|
||||
*/
|
||||
constructor(ref: DocumentReference<AppModelType, DbModelType>,
|
||||
/**
|
||||
* @internal
|
||||
* @private
|
||||
**/
|
||||
_fieldsProto?: ApiMapValue | undefined, readTime?: Timestamp, createTime?: Timestamp, updateTime?: Timestamp);
|
||||
/**
|
||||
* Creates a DocumentSnapshot from an object.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param ref The reference to the document.
|
||||
* @param obj The object to store in the DocumentSnapshot.
|
||||
* @return The created DocumentSnapshot.
|
||||
*/
|
||||
static fromObject<AppModelType, DbModelType extends firestore.DocumentData>(ref: DocumentReference<AppModelType, DbModelType>, obj: firestore.DocumentData): DocumentSnapshot<AppModelType, DbModelType>;
|
||||
/**
|
||||
* Creates a DocumentSnapshot from an UpdateMap.
|
||||
*
|
||||
* This methods expands the top-level field paths in a JavaScript map and
|
||||
* turns { foo.bar : foobar } into { foo { bar : foobar }}
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param ref The reference to the document.
|
||||
* @param data The field/value map to expand.
|
||||
* @return The created DocumentSnapshot.
|
||||
*/
|
||||
static fromUpdateMap<AppModelType, DbModelType extends firestore.DocumentData>(ref: firestore.DocumentReference<AppModelType, DbModelType>, data: UpdateMap): DocumentSnapshot<AppModelType, DbModelType>;
|
||||
/**
|
||||
* True if the document exists.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @name DocumentSnapshot#exists
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.get().then((documentSnapshot) => {
|
||||
* if (documentSnapshot.exists) {
|
||||
* console.log(`Data: ${JSON.stringify(documentSnapshot.data())}`);
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
get exists(): boolean;
|
||||
/**
|
||||
* A [DocumentReference]{@link DocumentReference} for the document
|
||||
* stored in this snapshot.
|
||||
*
|
||||
* @type {DocumentReference}
|
||||
* @name DocumentSnapshot#ref
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.get().then((documentSnapshot) => {
|
||||
* if (documentSnapshot.exists) {
|
||||
* console.log(`Found document at '${documentSnapshot.ref.path}'`);
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
get ref(): DocumentReference<AppModelType, DbModelType>;
|
||||
/**
|
||||
* The ID of the document for which this DocumentSnapshot contains data.
|
||||
*
|
||||
* @type {string}
|
||||
* @name DocumentSnapshot#id
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.get().then((documentSnapshot) => {
|
||||
* if (documentSnapshot.exists) {
|
||||
* console.log(`Document found with name '${documentSnapshot.id}'`);
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
get id(): string;
|
||||
/**
|
||||
* The time the document was created. Undefined for documents that don't
|
||||
* exist.
|
||||
*
|
||||
* @type {Timestamp|undefined}
|
||||
* @name DocumentSnapshot#createTime
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.get().then(documentSnapshot => {
|
||||
* if (documentSnapshot.exists) {
|
||||
* let createTime = documentSnapshot.createTime;
|
||||
* console.log(`Document created at '${createTime.toDate()}'`);
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
get createTime(): Timestamp | undefined;
|
||||
/**
|
||||
* The time the document was last updated (at the time the snapshot was
|
||||
* generated). Undefined for documents that don't exist.
|
||||
*
|
||||
* @type {Timestamp|undefined}
|
||||
* @name DocumentSnapshot#updateTime
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.get().then(documentSnapshot => {
|
||||
* if (documentSnapshot.exists) {
|
||||
* let updateTime = documentSnapshot.updateTime;
|
||||
* console.log(`Document updated at '${updateTime.toDate()}'`);
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
get updateTime(): Timestamp | undefined;
|
||||
/**
|
||||
* The time this snapshot was read.
|
||||
*
|
||||
* @type {Timestamp}
|
||||
* @name DocumentSnapshot#readTime
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.get().then(documentSnapshot => {
|
||||
* let readTime = documentSnapshot.readTime;
|
||||
* console.log(`Document read at '${readTime.toDate()}'`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
get readTime(): Timestamp;
|
||||
/**
|
||||
* Retrieves all fields in the document as an object. Returns 'undefined' if
|
||||
* the document doesn't exist.
|
||||
*
|
||||
* @returns {T|undefined} An object containing all fields in the document or
|
||||
* 'undefined' if the document doesn't exist.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.get().then(documentSnapshot => {
|
||||
* let data = documentSnapshot.data();
|
||||
* console.log(`Retrieved data: ${JSON.stringify(data)}`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
data(): AppModelType | undefined;
|
||||
/**
|
||||
* Retrieves the field specified by `field`.
|
||||
*
|
||||
* @param {string|FieldPath} field The field path
|
||||
* (e.g. 'foo' or 'foo.bar') to a specific field.
|
||||
* @returns {*} The data at the specified field location or undefined if no
|
||||
* such field exists.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.set({ a: { b: 'c' }}).then(() => {
|
||||
* return documentRef.get();
|
||||
* }).then(documentSnapshot => {
|
||||
* let field = documentSnapshot.get('a.b');
|
||||
* console.log(`Retrieved field value: ${field}`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
get(field: string | FieldPath): any;
|
||||
/**
|
||||
* Retrieves the field specified by 'fieldPath' in its Protobuf JS
|
||||
* representation.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param field The path (e.g. 'foo' or 'foo.bar') to a specific field.
|
||||
* @returns The Protobuf-encoded data at the specified field location or
|
||||
* undefined if no such field exists.
|
||||
*/
|
||||
protoField(field: string | FieldPath): api.IValue | undefined;
|
||||
/**
|
||||
* Convert a document snapshot to the Firestore 'Write' proto.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
toWriteProto(): api.IWrite;
|
||||
/**
|
||||
* Convert a document snapshot to the Firestore 'Document' proto.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
toDocumentProto(): api.IDocument;
|
||||
/**
|
||||
* Returns true if the document's data and path in this `DocumentSnapshot` is
|
||||
* equal to the provided value.
|
||||
*
|
||||
* @param {*} other The value to compare against.
|
||||
* @return {boolean} true if this `DocumentSnapshot` is equal to the provided
|
||||
* value.
|
||||
*/
|
||||
isEqual(other: firestore.DocumentSnapshot<AppModelType, DbModelType>): boolean;
|
||||
}
|
||||
/**
|
||||
* A QueryDocumentSnapshot contains data read from a document in your
|
||||
* Firestore database as part of a query. The document is guaranteed to exist
|
||||
* and its data can be extracted with [data()]{@link QueryDocumentSnapshot#data}
|
||||
* or [get()]{@link DocumentSnapshot#get} to get a specific field.
|
||||
*
|
||||
* A QueryDocumentSnapshot offers the same API surface as a
|
||||
* {@link DocumentSnapshot}. Since query results contain only existing
|
||||
* documents, the [exists]{@link DocumentSnapshot#exists} property will
|
||||
* always be true and [data()]{@link QueryDocumentSnapshot#data} will never
|
||||
* return 'undefined'.
|
||||
*
|
||||
* @class QueryDocumentSnapshot
|
||||
* @extends DocumentSnapshot
|
||||
*/
|
||||
export declare class QueryDocumentSnapshot<AppModelType = firestore.DocumentData, DbModelType extends firestore.DocumentData = firestore.DocumentData> extends DocumentSnapshot<AppModelType, DbModelType> implements firestore.QueryDocumentSnapshot<AppModelType, DbModelType> {
|
||||
/**
|
||||
* The time the document was created.
|
||||
*
|
||||
* @type {Timestamp}
|
||||
* @name QueryDocumentSnapshot#createTime
|
||||
* @readonly
|
||||
* @override
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let query = firestore.collection('col');
|
||||
*
|
||||
* query.get().forEach(snapshot => {
|
||||
* console.log(`Document created at '${snapshot.createTime.toDate()}'`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
get createTime(): Timestamp;
|
||||
/**
|
||||
* The time the document was last updated (at the time the snapshot was
|
||||
* generated).
|
||||
*
|
||||
* @type {Timestamp}
|
||||
* @name QueryDocumentSnapshot#updateTime
|
||||
* @readonly
|
||||
* @override
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let query = firestore.collection('col');
|
||||
*
|
||||
* query.get().forEach(snapshot => {
|
||||
* console.log(`Document updated at '${snapshot.updateTime.toDate()}'`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
get updateTime(): Timestamp;
|
||||
/**
|
||||
* Retrieves all fields in the document as an object.
|
||||
*
|
||||
* @override
|
||||
*
|
||||
* @returns {T} An object containing all fields in the document.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let query = firestore.collection('col');
|
||||
*
|
||||
* query.get().forEach(documentSnapshot => {
|
||||
* let data = documentSnapshot.data();
|
||||
* console.log(`Retrieved data: ${JSON.stringify(data)}`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
data(): AppModelType;
|
||||
}
|
||||
/**
|
||||
* A Firestore Document Mask contains the field paths affected by an update.
|
||||
*
|
||||
* @class
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare class DocumentMask {
|
||||
private _sortedPaths;
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
* @private
|
||||
*
|
||||
* @param fieldPaths The field paths in this mask.
|
||||
*/
|
||||
constructor(fieldPaths: FieldPath[]);
|
||||
/**
|
||||
* Creates a document mask with the field paths of a document.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param data A map with fields to modify. Only the keys are used to extract
|
||||
* the document mask.
|
||||
*/
|
||||
static fromUpdateMap(data: UpdateMap): DocumentMask;
|
||||
/**
|
||||
* Creates a document mask from an array of field paths.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param fieldMask A list of field paths.
|
||||
*/
|
||||
static fromFieldMask(fieldMask: Array<string | firestore.FieldPath>): DocumentMask;
|
||||
/**
|
||||
* Creates a document mask with the field names of a document.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param data An object with fields to modify. Only the keys are used to
|
||||
* extract the document mask.
|
||||
*/
|
||||
static fromObject(data: firestore.DocumentData): DocumentMask;
|
||||
/**
|
||||
* Returns true if this document mask contains no fields.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @return {boolean} Whether this document mask is empty.
|
||||
*/
|
||||
get isEmpty(): boolean;
|
||||
/**
|
||||
* Removes the specified values from a sorted field path array.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param input A sorted array of FieldPaths.
|
||||
* @param values An array of FieldPaths to remove.
|
||||
*/
|
||||
private static removeFromSortedArray;
|
||||
/**
|
||||
* Removes the field path specified in 'fieldPaths' from this document mask.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param fieldPaths An array of FieldPaths.
|
||||
*/
|
||||
removeFields(fieldPaths: FieldPath[]): void;
|
||||
/**
|
||||
* Returns whether this document mask contains 'fieldPath'.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param fieldPath The field path to test.
|
||||
* @return Whether this document mask contains 'fieldPath'.
|
||||
*/
|
||||
contains(fieldPath: FieldPath): boolean;
|
||||
/**
|
||||
* Removes all properties from 'data' that are not contained in this document
|
||||
* mask.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param data An object to filter.
|
||||
* @return A shallow copy of the object filtered by this document mask.
|
||||
*/
|
||||
applyTo(data: firestore.DocumentData): firestore.DocumentData;
|
||||
/**
|
||||
* Converts a document mask to the Firestore 'DocumentMask' Proto.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns A Firestore 'DocumentMask' Proto.
|
||||
*/
|
||||
toProto(): api.IDocumentMask;
|
||||
}
|
||||
/**
|
||||
* A Firestore Document Transform.
|
||||
*
|
||||
* A DocumentTransform contains pending server-side transforms and their
|
||||
* corresponding field paths.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @class
|
||||
*/
|
||||
export declare class DocumentTransform<AppModelType = firestore.DocumentData, DbModelType extends firestore.DocumentData = firestore.DocumentData> {
|
||||
private readonly ref;
|
||||
private readonly transforms;
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
* @private
|
||||
*
|
||||
* @param ref The DocumentReference for this transform.
|
||||
* @param transforms A Map of FieldPaths to FieldTransforms.
|
||||
*/
|
||||
constructor(ref: DocumentReference<AppModelType, DbModelType>, transforms: Map<FieldPath, FieldTransform>);
|
||||
/**
|
||||
* Generates a DocumentTransform from a JavaScript object.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param ref The `DocumentReference` to use for the DocumentTransform.
|
||||
* @param obj The object to extract the transformations from.
|
||||
* @returns The Document Transform.
|
||||
*/
|
||||
static fromObject<AppModelType, DbModelType extends firestore.DocumentData>(ref: firestore.DocumentReference<AppModelType, DbModelType>, obj: firestore.DocumentData): DocumentTransform<AppModelType, DbModelType>;
|
||||
/**
|
||||
* Generates a DocumentTransform from an Update Map.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param ref The `DocumentReference` to use for the DocumentTransform.
|
||||
* @param data The update data to extract the transformations from.
|
||||
* @returns The Document Transform.
|
||||
*/
|
||||
static fromUpdateMap<AppModelType, DbModelType extends firestore.DocumentData>(ref: firestore.DocumentReference<AppModelType, DbModelType>, data: UpdateMap): DocumentTransform<AppModelType, DbModelType>;
|
||||
/**
|
||||
* Whether this DocumentTransform contains any actionable transformations.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get isEmpty(): boolean;
|
||||
/**
|
||||
* Returns the array of fields in this DocumentTransform.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get fields(): FieldPath[];
|
||||
/**
|
||||
* Validates the user provided field values in this document transform.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
validate(): void;
|
||||
/**
|
||||
* Converts a document transform to the Firestore 'FieldTransform' Proto.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param serializer The Firestore serializer
|
||||
* @returns A list of Firestore 'FieldTransform' Protos
|
||||
*/
|
||||
toProto(serializer: Serializer): api.DocumentTransform.IFieldTransform[];
|
||||
}
|
||||
/**
|
||||
* A Firestore Precondition encapsulates options for database writes.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @class
|
||||
*/
|
||||
export declare class Precondition {
|
||||
private _exists?;
|
||||
private _lastUpdateTime?;
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
* @private
|
||||
*
|
||||
* @param options.exists - Whether the referenced document should exist in
|
||||
* Firestore,
|
||||
* @param options.lastUpdateTime - The last update time of the referenced
|
||||
* document in Firestore.
|
||||
* @param options
|
||||
*/
|
||||
constructor(options?: {
|
||||
exists?: boolean;
|
||||
lastUpdateTime?: firestore.Timestamp;
|
||||
});
|
||||
/**
|
||||
* Generates the Protobuf `Preconditon` object for this precondition.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns The `Preconditon` Protobuf object or 'null' if there are no
|
||||
* preconditions.
|
||||
*/
|
||||
toProto(): api.IPrecondition | null;
|
||||
/**
|
||||
* Whether this DocumentTransform contains any enforcement.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get isEmpty(): boolean;
|
||||
}
|
||||
936
server/node_modules/@google-cloud/firestore/build/src/document.js
generated
vendored
Normal file
936
server/node_modules/@google-cloud/firestore/build/src/document.js
generated
vendored
Normal file
@@ -0,0 +1,936 @@
|
||||
"use strict";
|
||||
/*!
|
||||
* Copyright 2019 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Precondition = exports.DocumentTransform = exports.DocumentMask = exports.QueryDocumentSnapshot = exports.DocumentSnapshot = exports.DocumentSnapshotBuilder = void 0;
|
||||
const deepEqual = require("fast-deep-equal");
|
||||
const assert = require("assert");
|
||||
const field_value_1 = require("./field-value");
|
||||
const path_1 = require("./path");
|
||||
const document_reference_1 = require("./reference/document-reference");
|
||||
const types_1 = require("./types");
|
||||
const util_1 = require("./util");
|
||||
/**
|
||||
* Returns a builder for DocumentSnapshot and QueryDocumentSnapshot instances.
|
||||
* Invoke `.build()' to assemble the final snapshot.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class DocumentSnapshotBuilder {
|
||||
// We include the DocumentReference in the constructor in order to allow the
|
||||
// DocumentSnapshotBuilder to be typed with <AppModelType, DbModelType> when
|
||||
// it is constructed.
|
||||
constructor(ref) {
|
||||
this.ref = ref;
|
||||
}
|
||||
/**
|
||||
* Builds the DocumentSnapshot.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns Returns either a QueryDocumentSnapshot (if `fieldsProto` was
|
||||
* provided) or a DocumentSnapshot.
|
||||
*/
|
||||
build() {
|
||||
assert((this.fieldsProto !== undefined) === (this.createTime !== undefined), 'Create time should be set iff document exists.');
|
||||
assert((this.fieldsProto !== undefined) === (this.updateTime !== undefined), 'Update time should be set iff document exists.');
|
||||
return this.fieldsProto
|
||||
? new QueryDocumentSnapshot(this.ref, this.fieldsProto, this.readTime, this.createTime, this.updateTime)
|
||||
: new DocumentSnapshot(this.ref, undefined, this.readTime);
|
||||
}
|
||||
}
|
||||
exports.DocumentSnapshotBuilder = DocumentSnapshotBuilder;
|
||||
/**
|
||||
* A DocumentSnapshot is an immutable representation for a document in a
|
||||
* Firestore database. The data can be extracted with
|
||||
* [data()]{@link DocumentSnapshot#data} or
|
||||
* [get(fieldPath)]{@link DocumentSnapshot#get} to get a
|
||||
* specific field.
|
||||
*
|
||||
* <p>For a DocumentSnapshot that points to a non-existing document, any data
|
||||
* access will return 'undefined'. You can use the
|
||||
* [exists]{@link DocumentSnapshot#exists} property to explicitly verify a
|
||||
* document's existence.
|
||||
*
|
||||
* @class DocumentSnapshot
|
||||
*/
|
||||
class DocumentSnapshot {
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*
|
||||
* @param ref The reference to the document.
|
||||
* @param _fieldsProto The fields of the Firestore `Document` Protobuf backing
|
||||
* this document (or undefined if the document does not exist).
|
||||
* @param readTime The time when this snapshot was read (or undefined if
|
||||
* the document exists only locally).
|
||||
* @param createTime The time when the document was created (or undefined if
|
||||
* the document does not exist).
|
||||
* @param updateTime The time when the document was last updated (or undefined
|
||||
* if the document does not exist).
|
||||
*/
|
||||
constructor(ref,
|
||||
/**
|
||||
* @internal
|
||||
* @private
|
||||
**/
|
||||
_fieldsProto, readTime, createTime, updateTime) {
|
||||
this._fieldsProto = _fieldsProto;
|
||||
this._ref = ref;
|
||||
this._serializer = ref.firestore._serializer;
|
||||
this._readTime = readTime;
|
||||
this._createTime = createTime;
|
||||
this._updateTime = updateTime;
|
||||
}
|
||||
/**
|
||||
* Creates a DocumentSnapshot from an object.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param ref The reference to the document.
|
||||
* @param obj The object to store in the DocumentSnapshot.
|
||||
* @return The created DocumentSnapshot.
|
||||
*/
|
||||
static fromObject(ref, obj) {
|
||||
const serializer = ref.firestore._serializer;
|
||||
return new DocumentSnapshot(ref, serializer.encodeFields(obj));
|
||||
}
|
||||
/**
|
||||
* Creates a DocumentSnapshot from an UpdateMap.
|
||||
*
|
||||
* This methods expands the top-level field paths in a JavaScript map and
|
||||
* turns { foo.bar : foobar } into { foo { bar : foobar }}
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param ref The reference to the document.
|
||||
* @param data The field/value map to expand.
|
||||
* @return The created DocumentSnapshot.
|
||||
*/
|
||||
static fromUpdateMap(ref, data) {
|
||||
const serializer = ref
|
||||
.firestore._serializer;
|
||||
/**
|
||||
* Merges 'value' at the field path specified by the path array into
|
||||
* 'target'.
|
||||
*/
|
||||
function merge(target, value, path, pos) {
|
||||
const key = path[pos];
|
||||
const isLast = pos === path.length - 1;
|
||||
if (target[key] === undefined) {
|
||||
if (isLast) {
|
||||
if (value instanceof field_value_1.FieldTransform) {
|
||||
// If there is already data at this path, we need to retain it.
|
||||
// Otherwise, we don't include it in the DocumentSnapshot.
|
||||
return !(0, util_1.isEmpty)(target) ? target : null;
|
||||
}
|
||||
// The merge is done.
|
||||
const leafNode = serializer.encodeValue(value);
|
||||
if (leafNode) {
|
||||
target[key] = leafNode;
|
||||
}
|
||||
return target;
|
||||
}
|
||||
else {
|
||||
// We need to expand the target object.
|
||||
const childNode = {
|
||||
mapValue: {
|
||||
fields: {},
|
||||
},
|
||||
};
|
||||
const nestedValue = merge(childNode.mapValue.fields, value, path, pos + 1);
|
||||
if (nestedValue) {
|
||||
childNode.mapValue.fields = nestedValue;
|
||||
target[key] = childNode;
|
||||
return target;
|
||||
}
|
||||
else {
|
||||
return !(0, util_1.isEmpty)(target) ? target : null;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
assert(!isLast, "Can't merge current value into a nested object");
|
||||
target[key].mapValue.fields = merge(target[key].mapValue.fields, value, path, pos + 1);
|
||||
return target;
|
||||
}
|
||||
}
|
||||
const res = {};
|
||||
for (const [key, value] of data) {
|
||||
const path = key.toArray();
|
||||
merge(res, value, path, 0);
|
||||
}
|
||||
return new DocumentSnapshot(ref, res);
|
||||
}
|
||||
/**
|
||||
* True if the document exists.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @name DocumentSnapshot#exists
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.get().then((documentSnapshot) => {
|
||||
* if (documentSnapshot.exists) {
|
||||
* console.log(`Data: ${JSON.stringify(documentSnapshot.data())}`);
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
get exists() {
|
||||
return this._fieldsProto !== undefined;
|
||||
}
|
||||
/**
|
||||
* A [DocumentReference]{@link DocumentReference} for the document
|
||||
* stored in this snapshot.
|
||||
*
|
||||
* @type {DocumentReference}
|
||||
* @name DocumentSnapshot#ref
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.get().then((documentSnapshot) => {
|
||||
* if (documentSnapshot.exists) {
|
||||
* console.log(`Found document at '${documentSnapshot.ref.path}'`);
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
get ref() {
|
||||
return this._ref;
|
||||
}
|
||||
/**
|
||||
* The ID of the document for which this DocumentSnapshot contains data.
|
||||
*
|
||||
* @type {string}
|
||||
* @name DocumentSnapshot#id
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.get().then((documentSnapshot) => {
|
||||
* if (documentSnapshot.exists) {
|
||||
* console.log(`Document found with name '${documentSnapshot.id}'`);
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
get id() {
|
||||
return this._ref.id;
|
||||
}
|
||||
/**
|
||||
* The time the document was created. Undefined for documents that don't
|
||||
* exist.
|
||||
*
|
||||
* @type {Timestamp|undefined}
|
||||
* @name DocumentSnapshot#createTime
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.get().then(documentSnapshot => {
|
||||
* if (documentSnapshot.exists) {
|
||||
* let createTime = documentSnapshot.createTime;
|
||||
* console.log(`Document created at '${createTime.toDate()}'`);
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
get createTime() {
|
||||
return this._createTime;
|
||||
}
|
||||
/**
|
||||
* The time the document was last updated (at the time the snapshot was
|
||||
* generated). Undefined for documents that don't exist.
|
||||
*
|
||||
* @type {Timestamp|undefined}
|
||||
* @name DocumentSnapshot#updateTime
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.get().then(documentSnapshot => {
|
||||
* if (documentSnapshot.exists) {
|
||||
* let updateTime = documentSnapshot.updateTime;
|
||||
* console.log(`Document updated at '${updateTime.toDate()}'`);
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
get updateTime() {
|
||||
return this._updateTime;
|
||||
}
|
||||
/**
|
||||
* The time this snapshot was read.
|
||||
*
|
||||
* @type {Timestamp}
|
||||
* @name DocumentSnapshot#readTime
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.get().then(documentSnapshot => {
|
||||
* let readTime = documentSnapshot.readTime;
|
||||
* console.log(`Document read at '${readTime.toDate()}'`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
get readTime() {
|
||||
if (this._readTime === undefined) {
|
||||
throw new Error("Called 'readTime' on a local document");
|
||||
}
|
||||
return this._readTime;
|
||||
}
|
||||
/**
|
||||
* Retrieves all fields in the document as an object. Returns 'undefined' if
|
||||
* the document doesn't exist.
|
||||
*
|
||||
* @returns {T|undefined} An object containing all fields in the document or
|
||||
* 'undefined' if the document doesn't exist.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.get().then(documentSnapshot => {
|
||||
* let data = documentSnapshot.data();
|
||||
* console.log(`Retrieved data: ${JSON.stringify(data)}`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
data() {
|
||||
const fields = this._fieldsProto;
|
||||
if (fields === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
// We only want to use the converter and create a new QueryDocumentSnapshot
|
||||
// if a converter has been provided.
|
||||
if (this.ref._converter !== (0, types_1.defaultConverter)()) {
|
||||
const untypedReference = new document_reference_1.DocumentReference(this.ref.firestore, this.ref._path);
|
||||
return this.ref._converter.fromFirestore(new QueryDocumentSnapshot(untypedReference, this._fieldsProto, this.readTime, this.createTime, this.updateTime));
|
||||
}
|
||||
else {
|
||||
const obj = {};
|
||||
for (const prop of Object.keys(fields)) {
|
||||
obj[prop] = this._serializer.decodeValue(fields[prop]);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Retrieves the field specified by `field`.
|
||||
*
|
||||
* @param {string|FieldPath} field The field path
|
||||
* (e.g. 'foo' or 'foo.bar') to a specific field.
|
||||
* @returns {*} The data at the specified field location or undefined if no
|
||||
* such field exists.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.set({ a: { b: 'c' }}).then(() => {
|
||||
* return documentRef.get();
|
||||
* }).then(documentSnapshot => {
|
||||
* let field = documentSnapshot.get('a.b');
|
||||
* console.log(`Retrieved field value: ${field}`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
// We deliberately use `any` in the external API to not impose type-checking
|
||||
// on end users.
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
get(field) {
|
||||
(0, path_1.validateFieldPath)('field', field);
|
||||
const protoField = this.protoField(field);
|
||||
if (protoField === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
return this._serializer.decodeValue(protoField);
|
||||
}
|
||||
/**
|
||||
* Retrieves the field specified by 'fieldPath' in its Protobuf JS
|
||||
* representation.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param field The path (e.g. 'foo' or 'foo.bar') to a specific field.
|
||||
* @returns The Protobuf-encoded data at the specified field location or
|
||||
* undefined if no such field exists.
|
||||
*/
|
||||
protoField(field) {
|
||||
let fields = this._fieldsProto;
|
||||
if (fields === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
const components = path_1.FieldPath.fromArgument(field).toArray();
|
||||
while (components.length > 1) {
|
||||
fields = fields[components.shift()];
|
||||
if (!fields || !fields.mapValue) {
|
||||
return undefined;
|
||||
}
|
||||
fields = fields.mapValue.fields;
|
||||
}
|
||||
return fields[components[0]];
|
||||
}
|
||||
/**
|
||||
* Convert a document snapshot to the Firestore 'Write' proto.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
toWriteProto() {
|
||||
return {
|
||||
update: {
|
||||
name: this._ref.formattedName,
|
||||
fields: this._fieldsProto,
|
||||
},
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Convert a document snapshot to the Firestore 'Document' proto.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
toDocumentProto() {
|
||||
var _a, _b;
|
||||
return {
|
||||
name: this._ref.formattedName,
|
||||
createTime: (_a = this.createTime) === null || _a === void 0 ? void 0 : _a.toProto().timestampValue,
|
||||
updateTime: (_b = this.updateTime) === null || _b === void 0 ? void 0 : _b.toProto().timestampValue,
|
||||
fields: this._fieldsProto,
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Returns true if the document's data and path in this `DocumentSnapshot` is
|
||||
* equal to the provided value.
|
||||
*
|
||||
* @param {*} other The value to compare against.
|
||||
* @return {boolean} true if this `DocumentSnapshot` is equal to the provided
|
||||
* value.
|
||||
*/
|
||||
isEqual(other) {
|
||||
// Since the read time is different on every document read, we explicitly
|
||||
// ignore all document metadata in this comparison.
|
||||
return (this === other ||
|
||||
(other instanceof DocumentSnapshot &&
|
||||
this._ref.isEqual(other._ref) &&
|
||||
deepEqual(this._fieldsProto, other._fieldsProto)));
|
||||
}
|
||||
}
|
||||
exports.DocumentSnapshot = DocumentSnapshot;
|
||||
/**
|
||||
* A QueryDocumentSnapshot contains data read from a document in your
|
||||
* Firestore database as part of a query. The document is guaranteed to exist
|
||||
* and its data can be extracted with [data()]{@link QueryDocumentSnapshot#data}
|
||||
* or [get()]{@link DocumentSnapshot#get} to get a specific field.
|
||||
*
|
||||
* A QueryDocumentSnapshot offers the same API surface as a
|
||||
* {@link DocumentSnapshot}. Since query results contain only existing
|
||||
* documents, the [exists]{@link DocumentSnapshot#exists} property will
|
||||
* always be true and [data()]{@link QueryDocumentSnapshot#data} will never
|
||||
* return 'undefined'.
|
||||
*
|
||||
* @class QueryDocumentSnapshot
|
||||
* @extends DocumentSnapshot
|
||||
*/
|
||||
class QueryDocumentSnapshot extends DocumentSnapshot {
|
||||
/**
|
||||
* The time the document was created.
|
||||
*
|
||||
* @type {Timestamp}
|
||||
* @name QueryDocumentSnapshot#createTime
|
||||
* @readonly
|
||||
* @override
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let query = firestore.collection('col');
|
||||
*
|
||||
* query.get().forEach(snapshot => {
|
||||
* console.log(`Document created at '${snapshot.createTime.toDate()}'`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
get createTime() {
|
||||
return super.createTime;
|
||||
}
|
||||
/**
|
||||
* The time the document was last updated (at the time the snapshot was
|
||||
* generated).
|
||||
*
|
||||
* @type {Timestamp}
|
||||
* @name QueryDocumentSnapshot#updateTime
|
||||
* @readonly
|
||||
* @override
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let query = firestore.collection('col');
|
||||
*
|
||||
* query.get().forEach(snapshot => {
|
||||
* console.log(`Document updated at '${snapshot.updateTime.toDate()}'`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
get updateTime() {
|
||||
return super.updateTime;
|
||||
}
|
||||
/**
|
||||
* Retrieves all fields in the document as an object.
|
||||
*
|
||||
* @override
|
||||
*
|
||||
* @returns {T} An object containing all fields in the document.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let query = firestore.collection('col');
|
||||
*
|
||||
* query.get().forEach(documentSnapshot => {
|
||||
* let data = documentSnapshot.data();
|
||||
* console.log(`Retrieved data: ${JSON.stringify(data)}`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
data() {
|
||||
const data = super.data();
|
||||
if (!data) {
|
||||
throw new Error('The data in a QueryDocumentSnapshot should always exist.');
|
||||
}
|
||||
return data;
|
||||
}
|
||||
}
|
||||
exports.QueryDocumentSnapshot = QueryDocumentSnapshot;
|
||||
/**
|
||||
* A Firestore Document Mask contains the field paths affected by an update.
|
||||
*
|
||||
* @class
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class DocumentMask {
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
* @private
|
||||
*
|
||||
* @param fieldPaths The field paths in this mask.
|
||||
*/
|
||||
constructor(fieldPaths) {
|
||||
this._sortedPaths = fieldPaths;
|
||||
this._sortedPaths.sort((a, b) => a.compareTo(b));
|
||||
}
|
||||
/**
|
||||
* Creates a document mask with the field paths of a document.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param data A map with fields to modify. Only the keys are used to extract
|
||||
* the document mask.
|
||||
*/
|
||||
static fromUpdateMap(data) {
|
||||
const fieldPaths = [];
|
||||
data.forEach((value, key) => {
|
||||
if (!(value instanceof field_value_1.FieldTransform) || value.includeInDocumentMask) {
|
||||
fieldPaths.push(path_1.FieldPath.fromArgument(key));
|
||||
}
|
||||
});
|
||||
return new DocumentMask(fieldPaths);
|
||||
}
|
||||
/**
|
||||
* Creates a document mask from an array of field paths.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param fieldMask A list of field paths.
|
||||
*/
|
||||
static fromFieldMask(fieldMask) {
|
||||
const fieldPaths = [];
|
||||
for (const fieldPath of fieldMask) {
|
||||
fieldPaths.push(path_1.FieldPath.fromArgument(fieldPath));
|
||||
}
|
||||
return new DocumentMask(fieldPaths);
|
||||
}
|
||||
/**
|
||||
* Creates a document mask with the field names of a document.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param data An object with fields to modify. Only the keys are used to
|
||||
* extract the document mask.
|
||||
*/
|
||||
static fromObject(data) {
|
||||
const fieldPaths = [];
|
||||
function extractFieldPaths(currentData, currentPath) {
|
||||
let isEmpty = true;
|
||||
for (const key of Object.keys(currentData)) {
|
||||
isEmpty = false;
|
||||
// We don't split on dots since fromObject is called with
|
||||
// DocumentData.
|
||||
const childSegment = new path_1.FieldPath(key);
|
||||
const childPath = currentPath
|
||||
? currentPath.append(childSegment)
|
||||
: childSegment;
|
||||
const value = currentData[key];
|
||||
if (value instanceof field_value_1.FieldTransform) {
|
||||
if (value.includeInDocumentMask) {
|
||||
fieldPaths.push(childPath);
|
||||
}
|
||||
}
|
||||
else if ((0, util_1.isPlainObject)(value)) {
|
||||
extractFieldPaths(value, childPath);
|
||||
}
|
||||
else if (value !== undefined) {
|
||||
// If the value is undefined it can never participate in the document
|
||||
// mask. With `ignoreUndefinedProperties` set to false,
|
||||
// `validateDocumentData` will reject an undefined value before even
|
||||
// computing the document mask.
|
||||
fieldPaths.push(childPath);
|
||||
}
|
||||
}
|
||||
// Add a field path for an explicitly updated empty map.
|
||||
if (currentPath && isEmpty) {
|
||||
fieldPaths.push(currentPath);
|
||||
}
|
||||
}
|
||||
extractFieldPaths(data);
|
||||
return new DocumentMask(fieldPaths);
|
||||
}
|
||||
/**
|
||||
* Returns true if this document mask contains no fields.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @return {boolean} Whether this document mask is empty.
|
||||
*/
|
||||
get isEmpty() {
|
||||
return this._sortedPaths.length === 0;
|
||||
}
|
||||
/**
|
||||
* Removes the specified values from a sorted field path array.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param input A sorted array of FieldPaths.
|
||||
* @param values An array of FieldPaths to remove.
|
||||
*/
|
||||
static removeFromSortedArray(input, values) {
|
||||
for (let i = 0; i < input.length;) {
|
||||
let removed = false;
|
||||
for (const fieldPath of values) {
|
||||
if (input[i].isEqual(fieldPath)) {
|
||||
input.splice(i, 1);
|
||||
removed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!removed) {
|
||||
++i;
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Removes the field path specified in 'fieldPaths' from this document mask.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param fieldPaths An array of FieldPaths.
|
||||
*/
|
||||
removeFields(fieldPaths) {
|
||||
DocumentMask.removeFromSortedArray(this._sortedPaths, fieldPaths);
|
||||
}
|
||||
/**
|
||||
* Returns whether this document mask contains 'fieldPath'.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param fieldPath The field path to test.
|
||||
* @return Whether this document mask contains 'fieldPath'.
|
||||
*/
|
||||
contains(fieldPath) {
|
||||
for (const sortedPath of this._sortedPaths) {
|
||||
const cmp = sortedPath.compareTo(fieldPath);
|
||||
if (cmp === 0) {
|
||||
return true;
|
||||
}
|
||||
else if (cmp > 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
/**
|
||||
* Removes all properties from 'data' that are not contained in this document
|
||||
* mask.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param data An object to filter.
|
||||
* @return A shallow copy of the object filtered by this document mask.
|
||||
*/
|
||||
applyTo(data) {
|
||||
/*!
|
||||
* Applies this DocumentMask to 'data' and computes the list of field paths
|
||||
* that were specified in the mask but are not present in 'data'.
|
||||
*/
|
||||
const applyDocumentMask = data => {
|
||||
const remainingPaths = this._sortedPaths.slice(0);
|
||||
const processObject = (currentData, currentPath) => {
|
||||
let result = null;
|
||||
Object.keys(currentData).forEach(key => {
|
||||
const childPath = currentPath
|
||||
? currentPath.append(key)
|
||||
: new path_1.FieldPath(key);
|
||||
if (this.contains(childPath)) {
|
||||
DocumentMask.removeFromSortedArray(remainingPaths, [childPath]);
|
||||
result = result || {};
|
||||
result[key] = currentData[key];
|
||||
}
|
||||
else if ((0, util_1.isObject)(currentData[key])) {
|
||||
const childObject = processObject(currentData[key], childPath);
|
||||
if (childObject) {
|
||||
result = result || {};
|
||||
result[key] = childObject;
|
||||
}
|
||||
}
|
||||
});
|
||||
return result;
|
||||
};
|
||||
// processObject() returns 'null' if the DocumentMask is empty.
|
||||
const filteredData = processObject(data) || {};
|
||||
return {
|
||||
filteredData,
|
||||
remainingPaths,
|
||||
};
|
||||
};
|
||||
const result = applyDocumentMask(data);
|
||||
if (result.remainingPaths.length !== 0) {
|
||||
throw new Error(`Input data is missing for field "${result.remainingPaths[0]}".`);
|
||||
}
|
||||
return result.filteredData;
|
||||
}
|
||||
/**
|
||||
* Converts a document mask to the Firestore 'DocumentMask' Proto.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns A Firestore 'DocumentMask' Proto.
|
||||
*/
|
||||
toProto() {
|
||||
if (this.isEmpty) {
|
||||
return {};
|
||||
}
|
||||
const encodedPaths = [];
|
||||
for (const fieldPath of this._sortedPaths) {
|
||||
encodedPaths.push(fieldPath.formattedName);
|
||||
}
|
||||
return {
|
||||
fieldPaths: encodedPaths,
|
||||
};
|
||||
}
|
||||
}
|
||||
exports.DocumentMask = DocumentMask;
|
||||
/**
|
||||
* A Firestore Document Transform.
|
||||
*
|
||||
* A DocumentTransform contains pending server-side transforms and their
|
||||
* corresponding field paths.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @class
|
||||
*/
|
||||
class DocumentTransform {
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
* @private
|
||||
*
|
||||
* @param ref The DocumentReference for this transform.
|
||||
* @param transforms A Map of FieldPaths to FieldTransforms.
|
||||
*/
|
||||
constructor(ref, transforms) {
|
||||
this.ref = ref;
|
||||
this.transforms = transforms;
|
||||
}
|
||||
/**
|
||||
* Generates a DocumentTransform from a JavaScript object.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param ref The `DocumentReference` to use for the DocumentTransform.
|
||||
* @param obj The object to extract the transformations from.
|
||||
* @returns The Document Transform.
|
||||
*/
|
||||
static fromObject(ref, obj) {
|
||||
const updateMap = new Map();
|
||||
for (const prop of Object.keys(obj)) {
|
||||
updateMap.set(new path_1.FieldPath(prop), obj[prop]);
|
||||
}
|
||||
return DocumentTransform.fromUpdateMap(ref, updateMap);
|
||||
}
|
||||
/**
|
||||
* Generates a DocumentTransform from an Update Map.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param ref The `DocumentReference` to use for the DocumentTransform.
|
||||
* @param data The update data to extract the transformations from.
|
||||
* @returns The Document Transform.
|
||||
*/
|
||||
static fromUpdateMap(ref, data) {
|
||||
const transforms = new Map();
|
||||
function encode_(val, path, allowTransforms) {
|
||||
if (val instanceof field_value_1.FieldTransform && val.includeInDocumentTransform) {
|
||||
if (allowTransforms) {
|
||||
transforms.set(path, val);
|
||||
}
|
||||
else {
|
||||
throw new Error(`${val.methodName}() is not supported inside of array values.`);
|
||||
}
|
||||
}
|
||||
else if (Array.isArray(val)) {
|
||||
for (let i = 0; i < val.length; ++i) {
|
||||
// We need to verify that no array value contains a document transform
|
||||
encode_(val[i], path.append(String(i)), false);
|
||||
}
|
||||
}
|
||||
else if ((0, util_1.isPlainObject)(val)) {
|
||||
for (const prop of Object.keys(val)) {
|
||||
encode_(val[prop], path.append(new path_1.FieldPath(prop)), allowTransforms);
|
||||
}
|
||||
}
|
||||
}
|
||||
data.forEach((value, key) => {
|
||||
encode_(value, path_1.FieldPath.fromArgument(key), true);
|
||||
});
|
||||
return new DocumentTransform(ref, transforms);
|
||||
}
|
||||
/**
|
||||
* Whether this DocumentTransform contains any actionable transformations.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get isEmpty() {
|
||||
return this.transforms.size === 0;
|
||||
}
|
||||
/**
|
||||
* Returns the array of fields in this DocumentTransform.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get fields() {
|
||||
return Array.from(this.transforms.keys());
|
||||
}
|
||||
/**
|
||||
* Validates the user provided field values in this document transform.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
validate() {
|
||||
const allowUndefined = !!this.ref.firestore._settings.ignoreUndefinedProperties;
|
||||
this.transforms.forEach(transform => transform.validate(allowUndefined));
|
||||
}
|
||||
/**
|
||||
* Converts a document transform to the Firestore 'FieldTransform' Proto.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param serializer The Firestore serializer
|
||||
* @returns A list of Firestore 'FieldTransform' Protos
|
||||
*/
|
||||
toProto(serializer) {
|
||||
return Array.from(this.transforms, ([path, transform]) => transform.toProto(serializer, path));
|
||||
}
|
||||
}
|
||||
exports.DocumentTransform = DocumentTransform;
|
||||
/**
|
||||
* A Firestore Precondition encapsulates options for database writes.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @class
|
||||
*/
|
||||
class Precondition {
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
* @private
|
||||
*
|
||||
* @param options.exists - Whether the referenced document should exist in
|
||||
* Firestore,
|
||||
* @param options.lastUpdateTime - The last update time of the referenced
|
||||
* document in Firestore.
|
||||
* @param options
|
||||
*/
|
||||
constructor(options) {
|
||||
if (options !== undefined) {
|
||||
this._exists = options.exists;
|
||||
this._lastUpdateTime = options.lastUpdateTime;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Generates the Protobuf `Preconditon` object for this precondition.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns The `Preconditon` Protobuf object or 'null' if there are no
|
||||
* preconditions.
|
||||
*/
|
||||
toProto() {
|
||||
if (this.isEmpty) {
|
||||
return null;
|
||||
}
|
||||
const proto = {};
|
||||
if (this._lastUpdateTime !== undefined) {
|
||||
proto.updateTime = this._lastUpdateTime.toProto().timestampValue;
|
||||
}
|
||||
else {
|
||||
proto.exists = this._exists;
|
||||
}
|
||||
return proto;
|
||||
}
|
||||
/**
|
||||
* Whether this DocumentTransform contains any enforcement.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get isEmpty() {
|
||||
return this._exists === undefined && !this._lastUpdateTime;
|
||||
}
|
||||
}
|
||||
exports.Precondition = Precondition;
|
||||
//# sourceMappingURL=document.js.map
|
||||
289
server/node_modules/@google-cloud/firestore/build/src/field-value.d.ts
generated
vendored
Normal file
289
server/node_modules/@google-cloud/firestore/build/src/field-value.d.ts
generated
vendored
Normal file
@@ -0,0 +1,289 @@
|
||||
/*!
|
||||
* Copyright 2018 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import * as firestore from '@google-cloud/firestore';
|
||||
import * as proto from '../protos/firestore_v1_proto_api';
|
||||
import { FieldPath } from './path';
|
||||
import { Serializer } from './serializer';
|
||||
import api = proto.google.firestore.v1;
|
||||
/**
|
||||
* Represent a vector type in Firestore documents.
|
||||
* Create an instance with {@link FieldValue.vector}.
|
||||
*
|
||||
* @class VectorValue
|
||||
*/
|
||||
export declare class VectorValue implements firestore.VectorValue {
|
||||
private readonly _values;
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
constructor(values: number[] | undefined);
|
||||
/**
|
||||
* Returns a copy of the raw number array form of the vector.
|
||||
*/
|
||||
toArray(): number[];
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_toProto(serializer: Serializer): api.IValue;
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
static _fromProto(valueArray: api.IValue): VectorValue;
|
||||
/**
|
||||
* Returns `true` if the two VectorValue has the same raw number arrays, returns `false` otherwise.
|
||||
*/
|
||||
isEqual(other: VectorValue): boolean;
|
||||
}
|
||||
/**
|
||||
* Sentinel values that can be used when writing documents with set(), create()
|
||||
* or update().
|
||||
*
|
||||
* @class FieldValue
|
||||
*/
|
||||
export declare class FieldValue implements firestore.FieldValue {
|
||||
/** @private */
|
||||
constructor();
|
||||
/**
|
||||
* Creates a new `VectorValue` constructed with a copy of the given array of numbers.
|
||||
*
|
||||
* @param values - Create a `VectorValue` instance with a copy of this array of numbers.
|
||||
*
|
||||
* @returns A new `VectorValue` constructed with a copy of the given array of numbers.
|
||||
*/
|
||||
static vector(values?: number[]): VectorValue;
|
||||
/**
|
||||
* Returns a sentinel for use with update() or set() with {merge:true} to mark
|
||||
* a field for deletion.
|
||||
*
|
||||
* @returns {FieldValue} The sentinel value to use in your objects.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
* let data = { a: 'b', c: 'd' };
|
||||
*
|
||||
* documentRef.set(data).then(() => {
|
||||
* return documentRef.update({a: Firestore.FieldValue.delete()});
|
||||
* }).then(() => {
|
||||
* // Document now only contains { c: 'd' }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
static delete(): FieldValue;
|
||||
/**
|
||||
* Returns a sentinel used with set(), create() or update() to include a
|
||||
* server-generated timestamp in the written data.
|
||||
*
|
||||
* @return {FieldValue} The FieldValue sentinel for use in a call to set(),
|
||||
* create() or update().
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.set({
|
||||
* time: Firestore.FieldValue.serverTimestamp()
|
||||
* }).then(() => {
|
||||
* return documentRef.get();
|
||||
* }).then(doc => {
|
||||
* console.log(`Server time set to ${doc.get('time')}`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
static serverTimestamp(): FieldValue;
|
||||
/**
|
||||
* Returns a special value that can be used with set(), create() or update()
|
||||
* that tells the server to increment the the field's current value by the
|
||||
* given value.
|
||||
*
|
||||
* If either current field value or the operand uses floating point
|
||||
* precision, both values will be interpreted as floating point numbers and
|
||||
* all arithmetic will follow IEEE 754 semantics. Otherwise, integer
|
||||
* precision is kept and the result is capped between -2^63 and 2^63-1.
|
||||
*
|
||||
* If the current field value is not of type 'number', or if the field does
|
||||
* not yet exist, the transformation will set the field to the given value.
|
||||
*
|
||||
* @param {number} n The value to increment by.
|
||||
* @return {FieldValue} The FieldValue sentinel for use in a call to set(),
|
||||
* create() or update().
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.update(
|
||||
* 'counter', Firestore.FieldValue.increment(1)
|
||||
* ).then(() => {
|
||||
* return documentRef.get();
|
||||
* }).then(doc => {
|
||||
* // doc.get('counter') was incremented
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
static increment(n: number): FieldValue;
|
||||
/**
|
||||
* Returns a special value that can be used with set(), create() or update()
|
||||
* that tells the server to union the given elements with any array value that
|
||||
* already exists on the server. Each specified element that doesn't already
|
||||
* exist in the array will be added to the end. If the field being modified is
|
||||
* not already an array it will be overwritten with an array containing
|
||||
* exactly the specified elements.
|
||||
*
|
||||
* @param {...*} elements The elements to union into the array.
|
||||
* @return {FieldValue} The FieldValue sentinel for use in a call to set(),
|
||||
* create() or update().
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.update(
|
||||
* 'array', Firestore.FieldValue.arrayUnion('foo')
|
||||
* ).then(() => {
|
||||
* return documentRef.get();
|
||||
* }).then(doc => {
|
||||
* // doc.get('array') contains field 'foo'
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
static arrayUnion(...elements: unknown[]): FieldValue;
|
||||
/**
|
||||
* Returns a special value that can be used with set(), create() or update()
|
||||
* that tells the server to remove the given elements from any array value
|
||||
* that already exists on the server. All instances of each element specified
|
||||
* will be removed from the array. If the field being modified is not already
|
||||
* an array it will be overwritten with an empty array.
|
||||
*
|
||||
* @param {...*} elements The elements to remove from the array.
|
||||
* @return {FieldValue} The FieldValue sentinel for use in a call to set(),
|
||||
* create() or update().
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.update(
|
||||
* 'array', Firestore.FieldValue.arrayRemove('foo')
|
||||
* ).then(() => {
|
||||
* return documentRef.get();
|
||||
* }).then(doc => {
|
||||
* // doc.get('array') no longer contains field 'foo'
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
static arrayRemove(...elements: unknown[]): FieldValue;
|
||||
/**
|
||||
* Returns true if this `FieldValue` is equal to the provided value.
|
||||
*
|
||||
* @param {*} other The value to compare against.
|
||||
* @return {boolean} true if this `FieldValue` is equal to the provided value.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let fieldValues = [
|
||||
* Firestore.FieldValue.increment(-1.0),
|
||||
* Firestore.FieldValue.increment(-1),
|
||||
* Firestore.FieldValue.increment(-0.0),
|
||||
* Firestore.FieldValue.increment(-0),
|
||||
* Firestore.FieldValue.increment(0),
|
||||
* Firestore.FieldValue.increment(0.0),
|
||||
* Firestore.FieldValue.increment(1),
|
||||
* Firestore.FieldValue.increment(1.0)
|
||||
* ];
|
||||
*
|
||||
* let equal = 0;
|
||||
* for (let i = 0; i < fieldValues.length; ++i) {
|
||||
* for (let j = i + 1; j < fieldValues.length; ++j) {
|
||||
* if (fieldValues[i].isEqual(fieldValues[j])) {
|
||||
* ++equal;
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* console.log(`Found ${equal} equalities.`);
|
||||
* ```
|
||||
*/
|
||||
isEqual(other: firestore.FieldValue): boolean;
|
||||
}
|
||||
/**
|
||||
* An internal interface shared by all field transforms.
|
||||
*
|
||||
* A 'FieldTransform` subclass should implement '.includeInDocumentMask',
|
||||
* '.includeInDocumentTransform' and 'toProto' (if '.includeInDocumentTransform'
|
||||
* is 'true').
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @abstract
|
||||
*/
|
||||
export declare abstract class FieldTransform extends FieldValue {
|
||||
/** Whether this FieldTransform should be included in the document mask. */
|
||||
abstract get includeInDocumentMask(): boolean;
|
||||
/**
|
||||
* Whether this FieldTransform should be included in the list of document
|
||||
* transforms.
|
||||
*/
|
||||
abstract get includeInDocumentTransform(): boolean;
|
||||
/** The method name used to obtain the field transform. */
|
||||
abstract get methodName(): string;
|
||||
/**
|
||||
* Performs input validation on the values of this field transform.
|
||||
*
|
||||
* @param allowUndefined Whether to allow nested properties that are `undefined`.
|
||||
*/
|
||||
abstract validate(allowUndefined: boolean): void;
|
||||
/***
|
||||
* The proto representation for this field transform.
|
||||
*
|
||||
* @param serializer The Firestore serializer.
|
||||
* @param fieldPath The field path to apply this transformation to.
|
||||
* @return The 'FieldTransform' proto message.
|
||||
*/
|
||||
abstract toProto(serializer: Serializer, fieldPath: FieldPath): api.DocumentTransform.IFieldTransform;
|
||||
}
|
||||
/**
|
||||
* A transform that deletes a field from a Firestore document.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare class DeleteTransform extends FieldTransform {
|
||||
/**
|
||||
* Sentinel value for a field delete.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
static DELETE_SENTINEL: DeleteTransform;
|
||||
private constructor();
|
||||
/**
|
||||
* Deletes are included in document masks.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get includeInDocumentMask(): true;
|
||||
/**
|
||||
* Deletes are are omitted from document transforms.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get includeInDocumentTransform(): false;
|
||||
get methodName(): string;
|
||||
validate(): void;
|
||||
toProto(): never;
|
||||
}
|
||||
523
server/node_modules/@google-cloud/firestore/build/src/field-value.js
generated
vendored
Normal file
523
server/node_modules/@google-cloud/firestore/build/src/field-value.js
generated
vendored
Normal file
@@ -0,0 +1,523 @@
|
||||
"use strict";
|
||||
/*!
|
||||
* Copyright 2018 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.DeleteTransform = exports.FieldTransform = exports.FieldValue = exports.VectorValue = void 0;
|
||||
const deepEqual = require("fast-deep-equal");
|
||||
const serializer_1 = require("./serializer");
|
||||
const util_1 = require("./util");
|
||||
const validate_1 = require("./validate");
|
||||
/**
|
||||
* Represent a vector type in Firestore documents.
|
||||
* Create an instance with {@link FieldValue.vector}.
|
||||
*
|
||||
* @class VectorValue
|
||||
*/
|
||||
class VectorValue {
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
constructor(values) {
|
||||
// Making a copy of the parameter.
|
||||
this._values = (values || []).map(n => n);
|
||||
}
|
||||
/**
|
||||
* Returns a copy of the raw number array form of the vector.
|
||||
*/
|
||||
toArray() {
|
||||
return this._values.map(n => n);
|
||||
}
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_toProto(serializer) {
|
||||
return serializer.encodeVector(this._values);
|
||||
}
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
static _fromProto(valueArray) {
|
||||
var _a, _b;
|
||||
const values = (_b = (_a = valueArray.arrayValue) === null || _a === void 0 ? void 0 : _a.values) === null || _b === void 0 ? void 0 : _b.map(v => {
|
||||
return v.doubleValue;
|
||||
});
|
||||
return new VectorValue(values);
|
||||
}
|
||||
/**
|
||||
* Returns `true` if the two VectorValue has the same raw number arrays, returns `false` otherwise.
|
||||
*/
|
||||
isEqual(other) {
|
||||
return (0, util_1.isPrimitiveArrayEqual)(this._values, other._values);
|
||||
}
|
||||
}
|
||||
exports.VectorValue = VectorValue;
|
||||
/**
|
||||
* Sentinel values that can be used when writing documents with set(), create()
|
||||
* or update().
|
||||
*
|
||||
* @class FieldValue
|
||||
*/
|
||||
class FieldValue {
|
||||
/** @private */
|
||||
constructor() { }
|
||||
/**
|
||||
* Creates a new `VectorValue` constructed with a copy of the given array of numbers.
|
||||
*
|
||||
* @param values - Create a `VectorValue` instance with a copy of this array of numbers.
|
||||
*
|
||||
* @returns A new `VectorValue` constructed with a copy of the given array of numbers.
|
||||
*/
|
||||
static vector(values) {
|
||||
return new VectorValue(values);
|
||||
}
|
||||
/**
|
||||
* Returns a sentinel for use with update() or set() with {merge:true} to mark
|
||||
* a field for deletion.
|
||||
*
|
||||
* @returns {FieldValue} The sentinel value to use in your objects.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
* let data = { a: 'b', c: 'd' };
|
||||
*
|
||||
* documentRef.set(data).then(() => {
|
||||
* return documentRef.update({a: Firestore.FieldValue.delete()});
|
||||
* }).then(() => {
|
||||
* // Document now only contains { c: 'd' }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
static delete() {
|
||||
return DeleteTransform.DELETE_SENTINEL;
|
||||
}
|
||||
/**
|
||||
* Returns a sentinel used with set(), create() or update() to include a
|
||||
* server-generated timestamp in the written data.
|
||||
*
|
||||
* @return {FieldValue} The FieldValue sentinel for use in a call to set(),
|
||||
* create() or update().
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.set({
|
||||
* time: Firestore.FieldValue.serverTimestamp()
|
||||
* }).then(() => {
|
||||
* return documentRef.get();
|
||||
* }).then(doc => {
|
||||
* console.log(`Server time set to ${doc.get('time')}`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
static serverTimestamp() {
|
||||
return ServerTimestampTransform.SERVER_TIMESTAMP_SENTINEL;
|
||||
}
|
||||
/**
|
||||
* Returns a special value that can be used with set(), create() or update()
|
||||
* that tells the server to increment the the field's current value by the
|
||||
* given value.
|
||||
*
|
||||
* If either current field value or the operand uses floating point
|
||||
* precision, both values will be interpreted as floating point numbers and
|
||||
* all arithmetic will follow IEEE 754 semantics. Otherwise, integer
|
||||
* precision is kept and the result is capped between -2^63 and 2^63-1.
|
||||
*
|
||||
* If the current field value is not of type 'number', or if the field does
|
||||
* not yet exist, the transformation will set the field to the given value.
|
||||
*
|
||||
* @param {number} n The value to increment by.
|
||||
* @return {FieldValue} The FieldValue sentinel for use in a call to set(),
|
||||
* create() or update().
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.update(
|
||||
* 'counter', Firestore.FieldValue.increment(1)
|
||||
* ).then(() => {
|
||||
* return documentRef.get();
|
||||
* }).then(doc => {
|
||||
* // doc.get('counter') was incremented
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
static increment(n) {
|
||||
// eslint-disable-next-line prefer-rest-params
|
||||
(0, validate_1.validateMinNumberOfArguments)('FieldValue.increment', arguments, 1);
|
||||
return new NumericIncrementTransform(n);
|
||||
}
|
||||
/**
|
||||
* Returns a special value that can be used with set(), create() or update()
|
||||
* that tells the server to union the given elements with any array value that
|
||||
* already exists on the server. Each specified element that doesn't already
|
||||
* exist in the array will be added to the end. If the field being modified is
|
||||
* not already an array it will be overwritten with an array containing
|
||||
* exactly the specified elements.
|
||||
*
|
||||
* @param {...*} elements The elements to union into the array.
|
||||
* @return {FieldValue} The FieldValue sentinel for use in a call to set(),
|
||||
* create() or update().
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.update(
|
||||
* 'array', Firestore.FieldValue.arrayUnion('foo')
|
||||
* ).then(() => {
|
||||
* return documentRef.get();
|
||||
* }).then(doc => {
|
||||
* // doc.get('array') contains field 'foo'
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
static arrayUnion(...elements) {
|
||||
(0, validate_1.validateMinNumberOfArguments)('FieldValue.arrayUnion', elements, 1);
|
||||
return new ArrayUnionTransform(elements);
|
||||
}
|
||||
/**
|
||||
* Returns a special value that can be used with set(), create() or update()
|
||||
* that tells the server to remove the given elements from any array value
|
||||
* that already exists on the server. All instances of each element specified
|
||||
* will be removed from the array. If the field being modified is not already
|
||||
* an array it will be overwritten with an empty array.
|
||||
*
|
||||
* @param {...*} elements The elements to remove from the array.
|
||||
* @return {FieldValue} The FieldValue sentinel for use in a call to set(),
|
||||
* create() or update().
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
*
|
||||
* documentRef.update(
|
||||
* 'array', Firestore.FieldValue.arrayRemove('foo')
|
||||
* ).then(() => {
|
||||
* return documentRef.get();
|
||||
* }).then(doc => {
|
||||
* // doc.get('array') no longer contains field 'foo'
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
static arrayRemove(...elements) {
|
||||
(0, validate_1.validateMinNumberOfArguments)('FieldValue.arrayRemove', elements, 1);
|
||||
return new ArrayRemoveTransform(elements);
|
||||
}
|
||||
/**
|
||||
* Returns true if this `FieldValue` is equal to the provided value.
|
||||
*
|
||||
* @param {*} other The value to compare against.
|
||||
* @return {boolean} true if this `FieldValue` is equal to the provided value.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let fieldValues = [
|
||||
* Firestore.FieldValue.increment(-1.0),
|
||||
* Firestore.FieldValue.increment(-1),
|
||||
* Firestore.FieldValue.increment(-0.0),
|
||||
* Firestore.FieldValue.increment(-0),
|
||||
* Firestore.FieldValue.increment(0),
|
||||
* Firestore.FieldValue.increment(0.0),
|
||||
* Firestore.FieldValue.increment(1),
|
||||
* Firestore.FieldValue.increment(1.0)
|
||||
* ];
|
||||
*
|
||||
* let equal = 0;
|
||||
* for (let i = 0; i < fieldValues.length; ++i) {
|
||||
* for (let j = i + 1; j < fieldValues.length; ++j) {
|
||||
* if (fieldValues[i].isEqual(fieldValues[j])) {
|
||||
* ++equal;
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* console.log(`Found ${equal} equalities.`);
|
||||
* ```
|
||||
*/
|
||||
isEqual(other) {
|
||||
return this === other;
|
||||
}
|
||||
}
|
||||
exports.FieldValue = FieldValue;
|
||||
/**
|
||||
* An internal interface shared by all field transforms.
|
||||
*
|
||||
* A 'FieldTransform` subclass should implement '.includeInDocumentMask',
|
||||
* '.includeInDocumentTransform' and 'toProto' (if '.includeInDocumentTransform'
|
||||
* is 'true').
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @abstract
|
||||
*/
|
||||
class FieldTransform extends FieldValue {
|
||||
}
|
||||
exports.FieldTransform = FieldTransform;
|
||||
/**
|
||||
* A transform that deletes a field from a Firestore document.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class DeleteTransform extends FieldTransform {
|
||||
constructor() {
|
||||
super();
|
||||
}
|
||||
/**
|
||||
* Deletes are included in document masks.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get includeInDocumentMask() {
|
||||
return true;
|
||||
}
|
||||
/**
|
||||
* Deletes are are omitted from document transforms.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get includeInDocumentTransform() {
|
||||
return false;
|
||||
}
|
||||
get methodName() {
|
||||
return 'FieldValue.delete';
|
||||
}
|
||||
validate() { }
|
||||
toProto() {
|
||||
throw new Error('FieldValue.delete() should not be included in a FieldTransform');
|
||||
}
|
||||
}
|
||||
exports.DeleteTransform = DeleteTransform;
|
||||
/**
|
||||
* Sentinel value for a field delete.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
DeleteTransform.DELETE_SENTINEL = new DeleteTransform();
|
||||
/**
|
||||
* A transform that sets a field to the Firestore server time.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class ServerTimestampTransform extends FieldTransform {
|
||||
constructor() {
|
||||
super();
|
||||
}
|
||||
/**
|
||||
* Server timestamps are omitted from document masks.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get includeInDocumentMask() {
|
||||
return false;
|
||||
}
|
||||
/**
|
||||
* Server timestamps are included in document transforms.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get includeInDocumentTransform() {
|
||||
return true;
|
||||
}
|
||||
get methodName() {
|
||||
return 'FieldValue.serverTimestamp';
|
||||
}
|
||||
validate() { }
|
||||
toProto(serializer, fieldPath) {
|
||||
return {
|
||||
fieldPath: fieldPath.formattedName,
|
||||
setToServerValue: 'REQUEST_TIME',
|
||||
};
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Sentinel value for a server timestamp.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
ServerTimestampTransform.SERVER_TIMESTAMP_SENTINEL = new ServerTimestampTransform();
|
||||
/**
|
||||
* Increments a field value on the backend.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class NumericIncrementTransform extends FieldTransform {
|
||||
constructor(operand) {
|
||||
super();
|
||||
this.operand = operand;
|
||||
}
|
||||
/**
|
||||
* Numeric transforms are omitted from document masks.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get includeInDocumentMask() {
|
||||
return false;
|
||||
}
|
||||
/**
|
||||
* Numeric transforms are included in document transforms.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get includeInDocumentTransform() {
|
||||
return true;
|
||||
}
|
||||
get methodName() {
|
||||
return 'FieldValue.increment';
|
||||
}
|
||||
validate() {
|
||||
(0, validate_1.validateNumber)('FieldValue.increment()', this.operand);
|
||||
}
|
||||
toProto(serializer, fieldPath) {
|
||||
const encodedOperand = serializer.encodeValue(this.operand);
|
||||
return { fieldPath: fieldPath.formattedName, increment: encodedOperand };
|
||||
}
|
||||
isEqual(other) {
|
||||
return (this === other ||
|
||||
(other instanceof NumericIncrementTransform &&
|
||||
this.operand === other.operand));
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Transforms an array value via a union operation.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class ArrayUnionTransform extends FieldTransform {
|
||||
constructor(elements) {
|
||||
super();
|
||||
this.elements = elements;
|
||||
}
|
||||
/**
|
||||
* Array transforms are omitted from document masks.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get includeInDocumentMask() {
|
||||
return false;
|
||||
}
|
||||
/**
|
||||
* Array transforms are included in document transforms.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get includeInDocumentTransform() {
|
||||
return true;
|
||||
}
|
||||
get methodName() {
|
||||
return 'FieldValue.arrayUnion';
|
||||
}
|
||||
validate(allowUndefined) {
|
||||
for (let i = 0; i < this.elements.length; ++i) {
|
||||
validateArrayElement(i, this.elements[i], allowUndefined);
|
||||
}
|
||||
}
|
||||
toProto(serializer, fieldPath) {
|
||||
const encodedElements = serializer.encodeValue(this.elements).arrayValue;
|
||||
return {
|
||||
fieldPath: fieldPath.formattedName,
|
||||
appendMissingElements: encodedElements,
|
||||
};
|
||||
}
|
||||
isEqual(other) {
|
||||
return (this === other ||
|
||||
(other instanceof ArrayUnionTransform &&
|
||||
deepEqual(this.elements, other.elements)));
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Transforms an array value via a remove operation.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class ArrayRemoveTransform extends FieldTransform {
|
||||
constructor(elements) {
|
||||
super();
|
||||
this.elements = elements;
|
||||
}
|
||||
/**
|
||||
* Array transforms are omitted from document masks.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get includeInDocumentMask() {
|
||||
return false;
|
||||
}
|
||||
/**
|
||||
* Array transforms are included in document transforms.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get includeInDocumentTransform() {
|
||||
return true;
|
||||
}
|
||||
get methodName() {
|
||||
return 'FieldValue.arrayRemove';
|
||||
}
|
||||
validate(allowUndefined) {
|
||||
for (let i = 0; i < this.elements.length; ++i) {
|
||||
validateArrayElement(i, this.elements[i], allowUndefined);
|
||||
}
|
||||
}
|
||||
toProto(serializer, fieldPath) {
|
||||
const encodedElements = serializer.encodeValue(this.elements).arrayValue;
|
||||
return {
|
||||
fieldPath: fieldPath.formattedName,
|
||||
removeAllFromArray: encodedElements,
|
||||
};
|
||||
}
|
||||
isEqual(other) {
|
||||
return (this === other ||
|
||||
(other instanceof ArrayRemoveTransform &&
|
||||
deepEqual(this.elements, other.elements)));
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Validates that `value` can be used as an element inside of an array. Certain
|
||||
* field values (such as ServerTimestamps) are rejected. Nested arrays are also
|
||||
* rejected.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param arg The argument name or argument index (for varargs methods).
|
||||
* @param value The value to validate.
|
||||
* @param allowUndefined Whether to allow nested properties that are `undefined`.
|
||||
*/
|
||||
function validateArrayElement(arg, value, allowUndefined) {
|
||||
if (Array.isArray(value)) {
|
||||
throw new Error(`${(0, validate_1.invalidArgumentMessage)(arg, 'array element')} Nested arrays are not supported.`);
|
||||
}
|
||||
(0, serializer_1.validateUserInput)(arg, value, 'array element',
|
||||
/*path=*/ { allowDeletes: 'none', allowTransforms: false, allowUndefined },
|
||||
/*path=*/ undefined,
|
||||
/*level=*/ 0,
|
||||
/*inArray=*/ true);
|
||||
}
|
||||
//# sourceMappingURL=field-value.js.map
|
||||
184
server/node_modules/@google-cloud/firestore/build/src/filter.d.ts
generated
vendored
Normal file
184
server/node_modules/@google-cloud/firestore/build/src/filter.d.ts
generated
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
/*!
|
||||
* Copyright 2023 Google LLC. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import * as firestore from '@google-cloud/firestore';
|
||||
/**
|
||||
* A `Filter` represents a restriction on one or more field values and can
|
||||
* be used to refine the results of a {@link Query}.
|
||||
* `Filters`s are created by invoking {@link Filter#where}, {@link Filter#or},
|
||||
* or {@link Filter#and} and can then be passed to {@link Query#where}
|
||||
* to create a new {@link Query} instance that also contains this `Filter`.
|
||||
*/
|
||||
export declare abstract class Filter {
|
||||
/**
|
||||
* Creates and returns a new [Filter]{@link Filter}, which can be
|
||||
* applied to [Query.where()]{@link Query#where}, [Filter.or()]{@link Filter#or},
|
||||
* or [Filter.and()]{@link Filter#and}. When applied to a [Query]{@link Query}
|
||||
* it requires that documents must contain the specified field and that its value should
|
||||
* satisfy the relation constraint provided.
|
||||
*
|
||||
* @param {string|FieldPath} fieldPath The name of a property value to compare.
|
||||
* @param {string} opStr A comparison operation in the form of a string.
|
||||
* Acceptable operator strings are "<", "<=", "==", "!=", ">=", ">", "array-contains",
|
||||
* "in", "not-in", and "array-contains-any".
|
||||
* @param {*} value The value to which to compare the field for inclusion in
|
||||
* a query.
|
||||
* @returns {Filter} The created Filter.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let collectionRef = firestore.collection('col');
|
||||
*
|
||||
* collectionRef.where(Filter.where('foo', '==', 'bar')).get().then(querySnapshot => {
|
||||
* querySnapshot.forEach(documentSnapshot => {
|
||||
* console.log(`Found document at ${documentSnapshot.ref.path}`);
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
static where(fieldPath: string | firestore.FieldPath, opStr: firestore.WhereFilterOp, value: unknown): Filter;
|
||||
/**
|
||||
* Creates and returns a new [Filter]{@link Filter} that is a
|
||||
* disjunction of the given {@link Filter}s. A disjunction filter includes
|
||||
* a document if it satisfies any of the given {@link Filter}s.
|
||||
*
|
||||
* The returned Filter can be applied to [Query.where()]{@link Query#where},
|
||||
* [Filter.or()]{@link Filter#or}, or [Filter.and()]{@link Filter#and}. When
|
||||
* applied to a [Query]{@link Query} it requires that documents must satisfy
|
||||
* one of the provided {@link Filter}s.
|
||||
*
|
||||
* @param {...Filter} filters Optional. The {@link Filter}s
|
||||
* for OR operation. These must be created with calls to {@link Filter#where},
|
||||
* {@link Filter#or}, or {@link Filter#and}.
|
||||
* @returns {Filter} The created {@link Filter}.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let collectionRef = firestore.collection('col');
|
||||
*
|
||||
* // doc.foo == 'bar' || doc.baz > 0
|
||||
* let orFilter = Filter.or(Filter.where('foo', '==', 'bar'), Filter.where('baz', '>', 0));
|
||||
*
|
||||
* collectionRef.where(orFilter).get().then(querySnapshot => {
|
||||
* querySnapshot.forEach(documentSnapshot => {
|
||||
* console.log(`Found document at ${documentSnapshot.ref.path}`);
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
static or(...filters: Filter[]): Filter;
|
||||
/**
|
||||
* Creates and returns a new [Filter]{@link Filter} that is a
|
||||
* conjunction of the given {@link Filter}s. A conjunction filter includes
|
||||
* a document if it satisfies all of the given {@link Filter}s.
|
||||
*
|
||||
* The returned Filter can be applied to [Query.where()]{@link Query#where},
|
||||
* [Filter.or()]{@link Filter#or}, or [Filter.and()]{@link Filter#and}. When
|
||||
* applied to a [Query]{@link Query} it requires that documents must satisfy
|
||||
* one of the provided {@link Filter}s.
|
||||
*
|
||||
* @param {...Filter} filters Optional. The {@link Filter}s
|
||||
* for AND operation. These must be created with calls to {@link Filter#where},
|
||||
* {@link Filter#or}, or {@link Filter#and}.
|
||||
* @returns {Filter} The created {@link Filter}.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let collectionRef = firestore.collection('col');
|
||||
*
|
||||
* // doc.foo == 'bar' && doc.baz > 0
|
||||
* let andFilter = Filter.and(Filter.where('foo', '==', 'bar'), Filter.where('baz', '>', 0));
|
||||
*
|
||||
* collectionRef.where(andFilter).get().then(querySnapshot => {
|
||||
* querySnapshot.forEach(documentSnapshot => {
|
||||
* console.log(`Found document at ${documentSnapshot.ref.path}`);
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
static and(...filters: Filter[]): Filter;
|
||||
}
|
||||
/**
|
||||
* A `UnaryFilter` represents a restriction on one field value and can
|
||||
* be used to refine the results of a {@link Query}.
|
||||
* `UnaryFilter`s are created by invoking {@link Filter#where} and can then
|
||||
* be passed to {@link Query#where} to create a new {@link Query} instance
|
||||
* that also contains this `UnaryFilter`.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare class UnaryFilter extends Filter {
|
||||
private field;
|
||||
private operator;
|
||||
private value;
|
||||
/**
|
||||
@private
|
||||
@internal
|
||||
*/
|
||||
constructor(field: string | firestore.FieldPath, operator: firestore.WhereFilterOp, value: unknown);
|
||||
/**
|
||||
@private
|
||||
@internal
|
||||
*/
|
||||
_getField(): string | firestore.FieldPath;
|
||||
/**
|
||||
@private
|
||||
@internal
|
||||
*/
|
||||
_getOperator(): firestore.WhereFilterOp;
|
||||
/**
|
||||
@private
|
||||
@internal
|
||||
*/
|
||||
_getValue(): unknown;
|
||||
}
|
||||
/**
|
||||
* A `CompositeFilter` is used to narrow the set of documents returned
|
||||
* by a Firestore query by performing the logical OR or AND of multiple
|
||||
* {@link Filters}s. `CompositeFilters`s are created by invoking {@link Filter#or}
|
||||
* or {@link Filter#and} and can then be passed to {@link Query#where}
|
||||
* to create a new query instance that also contains the `CompositeFilter`.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare class CompositeFilter extends Filter {
|
||||
private filters;
|
||||
private operator;
|
||||
/**
|
||||
@private
|
||||
@internal
|
||||
*/
|
||||
constructor(filters: Filter[], operator: CompositeOperator);
|
||||
/**
|
||||
@private
|
||||
@internal
|
||||
*/
|
||||
_getFilters(): Filter[];
|
||||
/**
|
||||
@private
|
||||
@internal
|
||||
*/
|
||||
_getOperator(): CompositeOperator;
|
||||
}
|
||||
/**
|
||||
* Composition operator of a `CompositeFilter`. This operator specifies the
|
||||
* behavior of the `CompositeFilter`.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export type CompositeOperator = 'AND' | 'OR';
|
||||
202
server/node_modules/@google-cloud/firestore/build/src/filter.js
generated
vendored
Normal file
202
server/node_modules/@google-cloud/firestore/build/src/filter.js
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
"use strict";
|
||||
/*!
|
||||
* Copyright 2023 Google LLC. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.CompositeFilter = exports.UnaryFilter = exports.Filter = void 0;
|
||||
/**
|
||||
* A `Filter` represents a restriction on one or more field values and can
|
||||
* be used to refine the results of a {@link Query}.
|
||||
* `Filters`s are created by invoking {@link Filter#where}, {@link Filter#or},
|
||||
* or {@link Filter#and} and can then be passed to {@link Query#where}
|
||||
* to create a new {@link Query} instance that also contains this `Filter`.
|
||||
*/
|
||||
class Filter {
|
||||
/**
|
||||
* Creates and returns a new [Filter]{@link Filter}, which can be
|
||||
* applied to [Query.where()]{@link Query#where}, [Filter.or()]{@link Filter#or},
|
||||
* or [Filter.and()]{@link Filter#and}. When applied to a [Query]{@link Query}
|
||||
* it requires that documents must contain the specified field and that its value should
|
||||
* satisfy the relation constraint provided.
|
||||
*
|
||||
* @param {string|FieldPath} fieldPath The name of a property value to compare.
|
||||
* @param {string} opStr A comparison operation in the form of a string.
|
||||
* Acceptable operator strings are "<", "<=", "==", "!=", ">=", ">", "array-contains",
|
||||
* "in", "not-in", and "array-contains-any".
|
||||
* @param {*} value The value to which to compare the field for inclusion in
|
||||
* a query.
|
||||
* @returns {Filter} The created Filter.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let collectionRef = firestore.collection('col');
|
||||
*
|
||||
* collectionRef.where(Filter.where('foo', '==', 'bar')).get().then(querySnapshot => {
|
||||
* querySnapshot.forEach(documentSnapshot => {
|
||||
* console.log(`Found document at ${documentSnapshot.ref.path}`);
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
static where(fieldPath, opStr, value) {
|
||||
return new UnaryFilter(fieldPath, opStr, value);
|
||||
}
|
||||
/**
|
||||
* Creates and returns a new [Filter]{@link Filter} that is a
|
||||
* disjunction of the given {@link Filter}s. A disjunction filter includes
|
||||
* a document if it satisfies any of the given {@link Filter}s.
|
||||
*
|
||||
* The returned Filter can be applied to [Query.where()]{@link Query#where},
|
||||
* [Filter.or()]{@link Filter#or}, or [Filter.and()]{@link Filter#and}. When
|
||||
* applied to a [Query]{@link Query} it requires that documents must satisfy
|
||||
* one of the provided {@link Filter}s.
|
||||
*
|
||||
* @param {...Filter} filters Optional. The {@link Filter}s
|
||||
* for OR operation. These must be created with calls to {@link Filter#where},
|
||||
* {@link Filter#or}, or {@link Filter#and}.
|
||||
* @returns {Filter} The created {@link Filter}.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let collectionRef = firestore.collection('col');
|
||||
*
|
||||
* // doc.foo == 'bar' || doc.baz > 0
|
||||
* let orFilter = Filter.or(Filter.where('foo', '==', 'bar'), Filter.where('baz', '>', 0));
|
||||
*
|
||||
* collectionRef.where(orFilter).get().then(querySnapshot => {
|
||||
* querySnapshot.forEach(documentSnapshot => {
|
||||
* console.log(`Found document at ${documentSnapshot.ref.path}`);
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
static or(...filters) {
|
||||
return new CompositeFilter(filters, 'OR');
|
||||
}
|
||||
/**
|
||||
* Creates and returns a new [Filter]{@link Filter} that is a
|
||||
* conjunction of the given {@link Filter}s. A conjunction filter includes
|
||||
* a document if it satisfies all of the given {@link Filter}s.
|
||||
*
|
||||
* The returned Filter can be applied to [Query.where()]{@link Query#where},
|
||||
* [Filter.or()]{@link Filter#or}, or [Filter.and()]{@link Filter#and}. When
|
||||
* applied to a [Query]{@link Query} it requires that documents must satisfy
|
||||
* one of the provided {@link Filter}s.
|
||||
*
|
||||
* @param {...Filter} filters Optional. The {@link Filter}s
|
||||
* for AND operation. These must be created with calls to {@link Filter#where},
|
||||
* {@link Filter#or}, or {@link Filter#and}.
|
||||
* @returns {Filter} The created {@link Filter}.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let collectionRef = firestore.collection('col');
|
||||
*
|
||||
* // doc.foo == 'bar' && doc.baz > 0
|
||||
* let andFilter = Filter.and(Filter.where('foo', '==', 'bar'), Filter.where('baz', '>', 0));
|
||||
*
|
||||
* collectionRef.where(andFilter).get().then(querySnapshot => {
|
||||
* querySnapshot.forEach(documentSnapshot => {
|
||||
* console.log(`Found document at ${documentSnapshot.ref.path}`);
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
static and(...filters) {
|
||||
return new CompositeFilter(filters, 'AND');
|
||||
}
|
||||
}
|
||||
exports.Filter = Filter;
|
||||
/**
|
||||
* A `UnaryFilter` represents a restriction on one field value and can
|
||||
* be used to refine the results of a {@link Query}.
|
||||
* `UnaryFilter`s are created by invoking {@link Filter#where} and can then
|
||||
* be passed to {@link Query#where} to create a new {@link Query} instance
|
||||
* that also contains this `UnaryFilter`.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class UnaryFilter extends Filter {
|
||||
/**
|
||||
@private
|
||||
@internal
|
||||
*/
|
||||
constructor(field, operator, value) {
|
||||
super();
|
||||
this.field = field;
|
||||
this.operator = operator;
|
||||
this.value = value;
|
||||
}
|
||||
/**
|
||||
@private
|
||||
@internal
|
||||
*/
|
||||
_getField() {
|
||||
return this.field;
|
||||
}
|
||||
/**
|
||||
@private
|
||||
@internal
|
||||
*/
|
||||
_getOperator() {
|
||||
return this.operator;
|
||||
}
|
||||
/**
|
||||
@private
|
||||
@internal
|
||||
*/
|
||||
_getValue() {
|
||||
return this.value;
|
||||
}
|
||||
}
|
||||
exports.UnaryFilter = UnaryFilter;
|
||||
/**
|
||||
* A `CompositeFilter` is used to narrow the set of documents returned
|
||||
* by a Firestore query by performing the logical OR or AND of multiple
|
||||
* {@link Filters}s. `CompositeFilters`s are created by invoking {@link Filter#or}
|
||||
* or {@link Filter#and} and can then be passed to {@link Query#where}
|
||||
* to create a new query instance that also contains the `CompositeFilter`.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class CompositeFilter extends Filter {
|
||||
/**
|
||||
@private
|
||||
@internal
|
||||
*/
|
||||
constructor(filters, operator) {
|
||||
super();
|
||||
this.filters = filters;
|
||||
this.operator = operator;
|
||||
}
|
||||
/**
|
||||
@private
|
||||
@internal
|
||||
*/
|
||||
_getFilters() {
|
||||
return this.filters;
|
||||
}
|
||||
/**
|
||||
@private
|
||||
@internal
|
||||
*/
|
||||
_getOperator() {
|
||||
return this.operator;
|
||||
}
|
||||
}
|
||||
exports.CompositeFilter = CompositeFilter;
|
||||
//# sourceMappingURL=filter.js.map
|
||||
83
server/node_modules/@google-cloud/firestore/build/src/geo-point.d.ts
generated
vendored
Normal file
83
server/node_modules/@google-cloud/firestore/build/src/geo-point.d.ts
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
/*!
|
||||
* Copyright 2018 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import * as firestore from '@google-cloud/firestore';
|
||||
import { google } from '../protos/firestore_v1_proto_api';
|
||||
import { Serializable } from './serializer';
|
||||
import api = google.firestore.v1;
|
||||
/**
|
||||
* An immutable object representing a geographic location in Firestore. The
|
||||
* location is represented as a latitude/longitude pair.
|
||||
*
|
||||
* @class
|
||||
*/
|
||||
export declare class GeoPoint implements Serializable, firestore.GeoPoint {
|
||||
private readonly _latitude;
|
||||
private readonly _longitude;
|
||||
/**
|
||||
* Creates a [GeoPoint]{@link GeoPoint}.
|
||||
*
|
||||
* @param {number} latitude The latitude as a number between -90 and 90.
|
||||
* @param {number} longitude The longitude as a number between -180 and 180.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let data = {
|
||||
* google: new Firestore.GeoPoint(37.422, 122.084)
|
||||
* };
|
||||
*
|
||||
* firestore.doc('col/doc').set(data).then(() => {
|
||||
* console.log(`Location is ${data.google.latitude}, ` +
|
||||
* `${data.google.longitude}`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
constructor(latitude: number, longitude: number);
|
||||
/**
|
||||
* The latitude as a number between -90 and 90.
|
||||
*
|
||||
* @type {number}
|
||||
* @name GeoPoint#latitude
|
||||
* @readonly
|
||||
*/
|
||||
get latitude(): number;
|
||||
/**
|
||||
* The longitude as a number between -180 and 180.
|
||||
*
|
||||
* @type {number}
|
||||
* @name GeoPoint#longitude
|
||||
* @readonly
|
||||
*/
|
||||
get longitude(): number;
|
||||
/**
|
||||
* Returns true if this `GeoPoint` is equal to the provided value.
|
||||
*
|
||||
* @param {*} other The value to compare against.
|
||||
* @return {boolean} true if this `GeoPoint` is equal to the provided value.
|
||||
*/
|
||||
isEqual(other: firestore.GeoPoint): boolean;
|
||||
/**
|
||||
* Converts the GeoPoint to a google.type.LatLng proto.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
toProto(): api.IValue;
|
||||
/**
|
||||
* Converts a google.type.LatLng proto to its GeoPoint representation.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
static fromProto(proto: google.type.ILatLng): GeoPoint;
|
||||
}
|
||||
106
server/node_modules/@google-cloud/firestore/build/src/geo-point.js
generated
vendored
Normal file
106
server/node_modules/@google-cloud/firestore/build/src/geo-point.js
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
"use strict";
|
||||
/*!
|
||||
* Copyright 2018 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.GeoPoint = void 0;
|
||||
const validate_1 = require("./validate");
|
||||
/**
|
||||
* An immutable object representing a geographic location in Firestore. The
|
||||
* location is represented as a latitude/longitude pair.
|
||||
*
|
||||
* @class
|
||||
*/
|
||||
class GeoPoint {
|
||||
/**
|
||||
* Creates a [GeoPoint]{@link GeoPoint}.
|
||||
*
|
||||
* @param {number} latitude The latitude as a number between -90 and 90.
|
||||
* @param {number} longitude The longitude as a number between -180 and 180.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let data = {
|
||||
* google: new Firestore.GeoPoint(37.422, 122.084)
|
||||
* };
|
||||
*
|
||||
* firestore.doc('col/doc').set(data).then(() => {
|
||||
* console.log(`Location is ${data.google.latitude}, ` +
|
||||
* `${data.google.longitude}`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
constructor(latitude, longitude) {
|
||||
(0, validate_1.validateNumber)('latitude', latitude, { minValue: -90, maxValue: 90 });
|
||||
(0, validate_1.validateNumber)('longitude', longitude, { minValue: -180, maxValue: 180 });
|
||||
this._latitude = latitude;
|
||||
this._longitude = longitude;
|
||||
}
|
||||
/**
|
||||
* The latitude as a number between -90 and 90.
|
||||
*
|
||||
* @type {number}
|
||||
* @name GeoPoint#latitude
|
||||
* @readonly
|
||||
*/
|
||||
get latitude() {
|
||||
return this._latitude;
|
||||
}
|
||||
/**
|
||||
* The longitude as a number between -180 and 180.
|
||||
*
|
||||
* @type {number}
|
||||
* @name GeoPoint#longitude
|
||||
* @readonly
|
||||
*/
|
||||
get longitude() {
|
||||
return this._longitude;
|
||||
}
|
||||
/**
|
||||
* Returns true if this `GeoPoint` is equal to the provided value.
|
||||
*
|
||||
* @param {*} other The value to compare against.
|
||||
* @return {boolean} true if this `GeoPoint` is equal to the provided value.
|
||||
*/
|
||||
isEqual(other) {
|
||||
return (this === other ||
|
||||
(other instanceof GeoPoint &&
|
||||
this.latitude === other.latitude &&
|
||||
this.longitude === other.longitude));
|
||||
}
|
||||
/**
|
||||
* Converts the GeoPoint to a google.type.LatLng proto.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
toProto() {
|
||||
return {
|
||||
geoPointValue: {
|
||||
latitude: this.latitude,
|
||||
longitude: this.longitude,
|
||||
},
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Converts a google.type.LatLng proto to its GeoPoint representation.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
static fromProto(proto) {
|
||||
return new GeoPoint(proto.latitude || 0, proto.longitude || 0);
|
||||
}
|
||||
}
|
||||
exports.GeoPoint = GeoPoint;
|
||||
//# sourceMappingURL=geo-point.js.map
|
||||
990
server/node_modules/@google-cloud/firestore/build/src/index.d.ts
generated
vendored
Normal file
990
server/node_modules/@google-cloud/firestore/build/src/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,990 @@
|
||||
/*!
|
||||
* Copyright 2017 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import * as firestore from '@google-cloud/firestore';
|
||||
import { Duplex } from 'stream';
|
||||
import { google } from '../protos/firestore_v1_proto_api';
|
||||
import { BulkWriter } from './bulk-writer';
|
||||
import { BundleBuilder } from './bundle';
|
||||
import { DocumentSnapshot, QueryDocumentSnapshot } from './document';
|
||||
import { CollectionReference } from './reference/collection-reference';
|
||||
import { DocumentReference } from './reference/document-reference';
|
||||
import { Serializer } from './serializer';
|
||||
import { Transaction } from './transaction';
|
||||
import { FirestoreStreamingMethod, FirestoreUnaryMethod } from './types';
|
||||
import { WriteBatch } from './write-batch';
|
||||
import api = google.firestore.v1;
|
||||
import { CollectionGroup } from './collection-group';
|
||||
import { TraceUtil } from './telemetry/trace-util';
|
||||
export { CollectionReference } from './reference/collection-reference';
|
||||
export { DocumentReference } from './reference/document-reference';
|
||||
export { QuerySnapshot } from './reference/query-snapshot';
|
||||
export { Query } from './reference/query';
|
||||
export type { AggregateQuery } from './reference/aggregate-query';
|
||||
export type { AggregateQuerySnapshot } from './reference/aggregate-query-snapshot';
|
||||
export type { VectorQuery } from './reference/vector-query';
|
||||
export type { VectorQuerySnapshot } from './reference/vector-query-snapshot';
|
||||
export type { VectorQueryOptions } from './reference/vector-query-options';
|
||||
export { BulkWriter } from './bulk-writer';
|
||||
export type { BulkWriterError } from './bulk-writer';
|
||||
export type { BundleBuilder } from './bundle';
|
||||
export { DocumentSnapshot, QueryDocumentSnapshot } from './document';
|
||||
export { FieldValue, VectorValue } from './field-value';
|
||||
export { Filter } from './filter';
|
||||
export { WriteBatch, WriteResult } from './write-batch';
|
||||
export { Transaction } from './transaction';
|
||||
export { Timestamp } from './timestamp';
|
||||
export { DocumentChange } from './document-change';
|
||||
export type { DocumentChangeType } from './document-change';
|
||||
export { FieldPath } from './path';
|
||||
export { GeoPoint } from './geo-point';
|
||||
export { CollectionGroup };
|
||||
export { QueryPartition } from './query-partition';
|
||||
export { setLogFunction } from './logger';
|
||||
export { Aggregate, AggregateField } from './aggregate';
|
||||
export type { AggregateFieldType, AggregateSpec, AggregateType, } from './aggregate';
|
||||
export type { PlanSummary, ExecutionStats, ExplainMetrics, ExplainResults, } from './query-profile';
|
||||
/**
|
||||
* The maximum number of times to retry idempotent requests.
|
||||
* @private
|
||||
*/
|
||||
export declare const MAX_REQUEST_RETRIES = 5;
|
||||
/**
|
||||
* The maximum number of times to attempt a transaction before failing.
|
||||
* @private
|
||||
*/
|
||||
export declare const DEFAULT_MAX_TRANSACTION_ATTEMPTS = 5;
|
||||
/*!
|
||||
* The default number of idle GRPC channel to keep.
|
||||
*/
|
||||
export declare const DEFAULT_MAX_IDLE_CHANNELS = 1;
|
||||
/**
|
||||
* Document data (e.g. for use with
|
||||
* [set()]{@link DocumentReference#set}) consisting of fields mapped
|
||||
* to values.
|
||||
*
|
||||
* @typedef {Object.<string, *>} DocumentData
|
||||
*/
|
||||
/**
|
||||
* Converter used by [withConverter()]{@link Query#withConverter} to transform
|
||||
* user objects of type `AppModelType` into Firestore data of type
|
||||
* `DbModelType`.
|
||||
*
|
||||
* Using the converter allows you to specify generic type arguments when storing
|
||||
* and retrieving objects from Firestore.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* class Post {
|
||||
* constructor(readonly title: string, readonly author: string) {}
|
||||
*
|
||||
* toString(): string {
|
||||
* return this.title + ', by ' + this.author;
|
||||
* }
|
||||
* }
|
||||
*
|
||||
* const postConverter = {
|
||||
* toFirestore(post: Post): FirebaseFirestore.DocumentData {
|
||||
* return {title: post.title, author: post.author};
|
||||
* },
|
||||
* fromFirestore(
|
||||
* snapshot: FirebaseFirestore.QueryDocumentSnapshot
|
||||
* ): Post {
|
||||
* const data = snapshot.data();
|
||||
* return new Post(data.title, data.author);
|
||||
* }
|
||||
* };
|
||||
*
|
||||
* const postSnap = await Firestore()
|
||||
* .collection('posts')
|
||||
* .withConverter(postConverter)
|
||||
* .doc().get();
|
||||
* const post = postSnap.data();
|
||||
* if (post !== undefined) {
|
||||
* post.title; // string
|
||||
* post.toString(); // Should be defined
|
||||
* post.someNonExistentProperty; // TS error
|
||||
* }
|
||||
*
|
||||
* ```
|
||||
* @property {Function} toFirestore Called by the Firestore SDK to convert a
|
||||
* custom model object of type `AppModelType` into a plain Javascript object
|
||||
* (suitable for writing directly to the Firestore database).
|
||||
* @property {Function} fromFirestore Called by the Firestore SDK to convert
|
||||
* Firestore data into an object of type `AppModelType`.
|
||||
* @typedef {Object} FirestoreDataConverter
|
||||
*/
|
||||
/**
|
||||
* Update data (for use with [update]{@link DocumentReference#update})
|
||||
* that contains paths mapped to values. Fields that contain dots
|
||||
* reference nested fields within the document.
|
||||
*
|
||||
* You can update a top-level field in your document by using the field name
|
||||
* as a key (e.g. `foo`). The provided value completely replaces the contents
|
||||
* for this field.
|
||||
*
|
||||
* You can also update a nested field directly by using its field path as a key
|
||||
* (e.g. `foo.bar`). This nested field update replaces the contents at `bar`
|
||||
* but does not modify other data under `foo`.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* const documentRef = firestore.doc('coll/doc');
|
||||
* documentRef.set({a1: {a2: 'val'}, b1: {b2: 'val'}, c1: {c2: 'val'}});
|
||||
* documentRef.update({
|
||||
* b1: {b3: 'val'},
|
||||
* 'c1.c3': 'val',
|
||||
* });
|
||||
* // Value is {a1: {a2: 'val'}, b1: {b3: 'val'}, c1: {c2: 'val', c3: 'val'}}
|
||||
*
|
||||
* ```
|
||||
* @typedef {Object.<string, *>} UpdateData
|
||||
*/
|
||||
/**
|
||||
* An options object that configures conditional behavior of
|
||||
* [update()]{@link DocumentReference#update} and
|
||||
* [delete()]{@link DocumentReference#delete} calls in
|
||||
* [DocumentReference]{@link DocumentReference},
|
||||
* [WriteBatch]{@link WriteBatch}, [BulkWriter]{@link BulkWriter}, and
|
||||
* [Transaction]{@link Transaction}. Using Preconditions, these calls
|
||||
* can be restricted to only apply to documents that match the specified
|
||||
* conditions.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* const documentRef = firestore.doc('coll/doc');
|
||||
*
|
||||
* documentRef.get().then(snapshot => {
|
||||
* const updateTime = snapshot.updateTime;
|
||||
*
|
||||
* console.log(`Deleting document at update time: ${updateTime.toDate()}`);
|
||||
* return documentRef.delete({ lastUpdateTime: updateTime });
|
||||
* });
|
||||
*
|
||||
* ```
|
||||
* @property {Timestamp} lastUpdateTime The update time to enforce. If set,
|
||||
* enforces that the document was last updated at lastUpdateTime. Fails the
|
||||
* operation if the document was last updated at a different time.
|
||||
* @property {boolean} exists If set, enforces that the target document must
|
||||
* or must not exist.
|
||||
* @typedef {Object} Precondition
|
||||
*/
|
||||
/**
|
||||
* An options object that configures the behavior of
|
||||
* [set()]{@link DocumentReference#set} calls in
|
||||
* [DocumentReference]{@link DocumentReference},
|
||||
* [WriteBatch]{@link WriteBatch}, and
|
||||
* [Transaction]{@link Transaction}. These calls can be
|
||||
* configured to perform granular merges instead of overwriting the target
|
||||
* documents in their entirety by providing a SetOptions object with
|
||||
* { merge : true }.
|
||||
*
|
||||
* @property {boolean} merge Changes the behavior of a set() call to only
|
||||
* replace the values specified in its data argument. Fields omitted from the
|
||||
* set() call remain untouched.
|
||||
* @property {Array<(string|FieldPath)>} mergeFields Changes the behavior of
|
||||
* set() calls to only replace the specified field paths. Any field path that is
|
||||
* not specified is ignored and remains untouched.
|
||||
* It is an error to pass a SetOptions object to a set() call that is missing a
|
||||
* value for any of the fields specified here.
|
||||
* @typedef {Object} SetOptions
|
||||
*/
|
||||
/**
|
||||
* An options object that can be used to configure the behavior of
|
||||
* [getAll()]{@link Firestore#getAll} calls. By providing a `fieldMask`, these
|
||||
* calls can be configured to only return a subset of fields.
|
||||
*
|
||||
* @property {Array<(string|FieldPath)>} fieldMask Specifies the set of fields
|
||||
* to return and reduces the amount of data transmitted by the backend.
|
||||
* Adding a field mask does not filter results. Documents do not need to
|
||||
* contain values for all the fields in the mask to be part of the result set.
|
||||
* @typedef {Object} ReadOptions
|
||||
*/
|
||||
/**
|
||||
* An options object to configure throttling on BulkWriter.
|
||||
*
|
||||
* Whether to disable or configure throttling. By default, throttling is
|
||||
* enabled. `throttling` can be set to either a boolean or a config object.
|
||||
* Setting it to `true` will use default values. You can override the defaults
|
||||
* by setting it to `false` to disable throttling, or by setting the config
|
||||
* values to enable throttling with the provided values.
|
||||
*
|
||||
* @property {boolean|Object} throttling Whether to disable or enable
|
||||
* throttling. Throttling is enabled by default, if the field is set to `true`
|
||||
* or if any custom throttling options are provided. `{ initialOpsPerSecond:
|
||||
* number }` sets the initial maximum number of operations per second allowed by
|
||||
* the throttler. If `initialOpsPerSecond` is not set, the default is 500
|
||||
* operations per second. `{ maxOpsPerSecond: number }` sets the maximum number
|
||||
* of operations per second allowed by the throttler. If `maxOpsPerSecond` is
|
||||
* not set, no maximum is enforced.
|
||||
* @typedef {Object} BulkWriterOptions
|
||||
*/
|
||||
/**
|
||||
* An error thrown when a BulkWriter operation fails.
|
||||
*
|
||||
* The error used by {@link BulkWriter~shouldRetryCallback} set in
|
||||
* {@link BulkWriter#onWriteError}.
|
||||
*
|
||||
* @property {GrpcStatus} code The status code of the error.
|
||||
* @property {string} message The error message of the error.
|
||||
* @property {DocumentReference} documentRef The document reference the
|
||||
* operation was performed on.
|
||||
* @property {'create' | 'set' | 'update' | 'delete'} operationType The type
|
||||
* of operation performed.
|
||||
* @property {number} failedAttempts How many times this operation has been
|
||||
* attempted unsuccessfully.
|
||||
* @typedef {Error} BulkWriterError
|
||||
*/
|
||||
/**
|
||||
* Status codes returned by GRPC operations.
|
||||
*
|
||||
* @see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
|
||||
*
|
||||
* @enum {number}
|
||||
* @typedef {Object} GrpcStatus
|
||||
*/
|
||||
/**
|
||||
* The Firestore client represents a Firestore Database and is the entry point
|
||||
* for all Firestore operations.
|
||||
*
|
||||
* @see [Firestore Documentation]{@link https://firebase.google.com/docs/firestore/}
|
||||
*
|
||||
* @class
|
||||
*
|
||||
* @example Install the client library with <a href="https://www.npmjs.com/">npm</a>:
|
||||
* ```
|
||||
* npm install --save @google-cloud/firestore
|
||||
*
|
||||
* ```
|
||||
* @example Import the client library
|
||||
* ```
|
||||
* var Firestore = require('@google-cloud/firestore');
|
||||
*
|
||||
* ```
|
||||
* @example Create a client that uses <a href="https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application">Application Default Credentials (ADC)</a>:
|
||||
* ```
|
||||
* var firestore = new Firestore();
|
||||
*
|
||||
* ```
|
||||
* @example Create a client with <a href="https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually">explicit credentials</a>:
|
||||
* ```
|
||||
* var firestore = new Firestore({ projectId:
|
||||
* 'your-project-id', keyFilename: '/path/to/keyfile.json'
|
||||
* });
|
||||
*
|
||||
* ```
|
||||
* @example <caption>include:samples/quickstart.js</caption>
|
||||
* region_tag:firestore_quickstart
|
||||
* Full quickstart example:
|
||||
*/
|
||||
export declare class Firestore implements firestore.Firestore {
|
||||
/**
|
||||
* A client pool to distribute requests over multiple GAPIC clients in order
|
||||
* to work around a connection limit of 100 concurrent requests per client.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _clientPool;
|
||||
/**
|
||||
* Preloaded instance of google-gax (full module, with gRPC support).
|
||||
*/
|
||||
private _gax?;
|
||||
/**
|
||||
* Preloaded instance of google-gax HTTP fallback implementation (no gRPC).
|
||||
*/
|
||||
private _gaxFallback?;
|
||||
/**
|
||||
* The configuration options for the GAPIC client.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_settings: firestore.Settings;
|
||||
/**
|
||||
* Settings for the exponential backoff used by the streaming endpoints.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _backoffSettings;
|
||||
/**
|
||||
* Whether the initialization settings can still be changed by invoking
|
||||
* `settings()`.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _settingsFrozen;
|
||||
/**
|
||||
* The serializer to use for the Protobuf transformation.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_serializer: Serializer | null;
|
||||
/**
|
||||
* The OpenTelemetry tracing utility object.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_traceUtil: TraceUtil;
|
||||
/**
|
||||
* The project ID for this client.
|
||||
*
|
||||
* The project ID is auto-detected during the first request unless a project
|
||||
* ID is passed to the constructor (or provided via `.settings()`).
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _projectId;
|
||||
/**
|
||||
* The database ID provided via `.settings()`.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _databaseId;
|
||||
/**
|
||||
* Count of listeners that have been registered on the client.
|
||||
*
|
||||
* The client can only be terminated when there are no pending writes or
|
||||
* registered listeners.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private registeredListenersCount;
|
||||
/**
|
||||
* A lazy-loaded BulkWriter instance to be used with recursiveDelete() if no
|
||||
* BulkWriter instance is provided.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private _bulkWriter;
|
||||
/**
|
||||
* Lazy-load the Firestore's default BulkWriter.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private getBulkWriter;
|
||||
/**
|
||||
* Number of pending operations on the client.
|
||||
*
|
||||
* The client can only be terminated when there are no pending writes or
|
||||
* registered listeners.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private bulkWritersCount;
|
||||
/**
|
||||
* @param {Object=} settings [Configuration object](#/docs).
|
||||
* @param {string=} settings.projectId The project ID from the Google
|
||||
* Developer's Console, e.g. 'grape-spaceship-123'. We will also check the
|
||||
* environment variable GCLOUD_PROJECT for your project ID. Can be omitted in
|
||||
* environments that support
|
||||
* {@link https://cloud.google.com/docs/authentication Application Default
|
||||
* Credentials}
|
||||
* @param {string=} settings.keyFilename Local file containing the Service
|
||||
* Account credentials as downloaded from the Google Developers Console. Can
|
||||
* be omitted in environments that support
|
||||
* {@link https://cloud.google.com/docs/authentication Application Default
|
||||
* Credentials}. To configure Firestore with custom credentials, use
|
||||
* `settings.credentials` and provide the `client_email` and `private_key` of
|
||||
* your service account.
|
||||
* @param {{client_email:string=, private_key:string=}=} settings.credentials
|
||||
* The `client_email` and `private_key` properties of the service account
|
||||
* to use with your Firestore project. Can be omitted in environments that
|
||||
* support {@link https://cloud.google.com/docs/authentication Application
|
||||
* Default Credentials}. If your credentials are stored in a JSON file, you
|
||||
* can specify a `keyFilename` instead.
|
||||
* @param {string=} settings.host The host to connect to.
|
||||
* @param {boolean=} settings.ssl Whether to use SSL when connecting.
|
||||
* @param {number=} settings.maxIdleChannels The maximum number of idle GRPC
|
||||
* channels to keep. A smaller number of idle channels reduces memory usage
|
||||
* but increases request latency for clients with fluctuating request rates.
|
||||
* If set to 0, shuts down all GRPC channels when the client becomes idle.
|
||||
* Defaults to 1.
|
||||
* @param {boolean=} settings.ignoreUndefinedProperties Whether to skip nested
|
||||
* properties that are set to `undefined` during object serialization. If set
|
||||
* to `true`, these properties are skipped and not written to Firestore. If
|
||||
* set `false` or omitted, the SDK throws an exception when it encounters
|
||||
* properties of type `undefined`.
|
||||
* @param {boolean=} settings.preferRest Whether to force the use of HTTP/1.1 REST
|
||||
* transport until a method that requires gRPC is called. When a method requires gRPC,
|
||||
* this Firestore client will load dependent gRPC libraries and then use gRPC transport
|
||||
* for communication from that point forward. Currently the only operation
|
||||
* that requires gRPC is creating a snapshot listener with the method
|
||||
* `DocumentReference<T>.onSnapshot()`, `CollectionReference<T>.onSnapshot()`, or
|
||||
* `Query<T>.onSnapshot()`. If specified, this setting value will take precedent over the
|
||||
* environment variable `FIRESTORE_PREFER_REST`. If not specified, the
|
||||
* SDK will use the value specified in the environment variable `FIRESTORE_PREFER_REST`.
|
||||
* Valid values of `FIRESTORE_PREFER_REST` are `true` ('1') or `false` (`0`). Values are
|
||||
* not case-sensitive. Any other value for the environment variable will be ignored and
|
||||
* a warning will be logged to the console.
|
||||
*/
|
||||
constructor(settings?: firestore.Settings);
|
||||
/**
|
||||
* Specifies custom settings to be used to configure the `Firestore`
|
||||
* instance. Can only be invoked once and before any other Firestore method.
|
||||
*
|
||||
* If settings are provided via both `settings()` and the `Firestore`
|
||||
* constructor, both settings objects are merged and any settings provided via
|
||||
* `settings()` take precedence.
|
||||
*
|
||||
* @param {object} settings The settings to use for all Firestore operations.
|
||||
*/
|
||||
settings(settings: firestore.Settings): void;
|
||||
private validateAndApplySettings;
|
||||
private newTraceUtilInstance;
|
||||
/**
|
||||
* Returns the Project ID for this Firestore instance. Validates that
|
||||
* `initializeIfNeeded()` was called before.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get projectId(): string;
|
||||
/**
|
||||
* Returns the Database ID for this Firestore instance.
|
||||
*/
|
||||
get databaseId(): string;
|
||||
/**
|
||||
* Returns the root path of the database. Validates that
|
||||
* `initializeIfNeeded()` was called before.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get formattedName(): string;
|
||||
/**
|
||||
* Gets a [DocumentReference]{@link DocumentReference} instance that
|
||||
* refers to the document at the specified path.
|
||||
*
|
||||
* @param {string} documentPath A slash-separated path to a document.
|
||||
* @returns {DocumentReference} The
|
||||
* [DocumentReference]{@link DocumentReference} instance.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let documentRef = firestore.doc('collection/document');
|
||||
* console.log(`Path of document is ${documentRef.path}`);
|
||||
* ```
|
||||
*/
|
||||
doc(documentPath: string): DocumentReference;
|
||||
/**
|
||||
* Gets a [CollectionReference]{@link CollectionReference} instance
|
||||
* that refers to the collection at the specified path.
|
||||
*
|
||||
* @param {string} collectionPath A slash-separated path to a collection.
|
||||
* @returns {CollectionReference} The
|
||||
* [CollectionReference]{@link CollectionReference} instance.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let collectionRef = firestore.collection('collection');
|
||||
*
|
||||
* // Add a document with an auto-generated ID.
|
||||
* collectionRef.add({foo: 'bar'}).then((documentRef) => {
|
||||
* console.log(`Added document at ${documentRef.path})`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
collection(collectionPath: string): CollectionReference;
|
||||
/**
|
||||
* Creates and returns a new Query that includes all documents in the
|
||||
* database that are contained in a collection or subcollection with the
|
||||
* given collectionId.
|
||||
*
|
||||
* @param {string} collectionId Identifies the collections to query over.
|
||||
* Every collection or subcollection with this ID as the last segment of its
|
||||
* path will be included. Cannot contain a slash.
|
||||
* @returns {CollectionGroup} The created CollectionGroup.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let docA = firestore.doc('mygroup/docA').set({foo: 'bar'});
|
||||
* let docB = firestore.doc('abc/def/mygroup/docB').set({foo: 'bar'});
|
||||
*
|
||||
* Promise.all([docA, docB]).then(() => {
|
||||
* let query = firestore.collectionGroup('mygroup');
|
||||
* query = query.where('foo', '==', 'bar');
|
||||
* return query.get().then(snapshot => {
|
||||
* console.log(`Found ${snapshot.size} documents.`);
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
collectionGroup(collectionId: string): CollectionGroup;
|
||||
/**
|
||||
* Creates a [WriteBatch]{@link WriteBatch}, used for performing
|
||||
* multiple writes as a single atomic operation.
|
||||
*
|
||||
* @returns {WriteBatch} A WriteBatch that operates on this Firestore
|
||||
* client.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let writeBatch = firestore.batch();
|
||||
*
|
||||
* // Add two documents in an atomic batch.
|
||||
* let data = { foo: 'bar' };
|
||||
* writeBatch.set(firestore.doc('col/doc1'), data);
|
||||
* writeBatch.set(firestore.doc('col/doc2'), data);
|
||||
*
|
||||
* writeBatch.commit().then(res => {
|
||||
* console.log('Successfully executed batch.');
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
batch(): WriteBatch;
|
||||
/**
|
||||
* Creates a [BulkWriter]{@link BulkWriter}, used for performing
|
||||
* multiple writes in parallel. Gradually ramps up writes as specified
|
||||
* by the 500/50/5 rule.
|
||||
*
|
||||
* If you pass [BulkWriterOptions]{@link BulkWriterOptions}, you can
|
||||
* configure the throttling rates for the created BulkWriter.
|
||||
*
|
||||
* @see [500/50/5 Documentation]{@link https://firebase.google.com/docs/firestore/best-practices#ramping_up_traffic}
|
||||
*
|
||||
* @param {BulkWriterOptions=} options BulkWriter options.
|
||||
* @returns {BulkWriter} A BulkWriter that operates on this Firestore
|
||||
* client.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let bulkWriter = firestore.bulkWriter();
|
||||
*
|
||||
* bulkWriter.create(firestore.doc('col/doc1'), {foo: 'bar'})
|
||||
* .then(res => {
|
||||
* console.log(`Added document at ${res.writeTime}`);
|
||||
* });
|
||||
* bulkWriter.update(firestore.doc('col/doc2'), {foo: 'bar'})
|
||||
* .then(res => {
|
||||
* console.log(`Updated document at ${res.writeTime}`);
|
||||
* });
|
||||
* bulkWriter.delete(firestore.doc('col/doc3'))
|
||||
* .then(res => {
|
||||
* console.log(`Deleted document at ${res.writeTime}`);
|
||||
* });
|
||||
* await bulkWriter.close().then(() => {
|
||||
* console.log('Executed all writes');
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
bulkWriter(options?: firestore.BulkWriterOptions): BulkWriter;
|
||||
/**
|
||||
* Creates a [DocumentSnapshot]{@link DocumentSnapshot} or a
|
||||
* [QueryDocumentSnapshot]{@link QueryDocumentSnapshot} from a
|
||||
* `firestore.v1.Document` proto (or from a resource name for missing
|
||||
* documents).
|
||||
*
|
||||
* This API is used by Google Cloud Functions and can be called with both
|
||||
* 'Proto3 JSON' and 'Protobuf JS' encoded data.
|
||||
*
|
||||
* @private
|
||||
* @param documentOrName The Firestore 'Document' proto or the resource name
|
||||
* of a missing document.
|
||||
* @param readTime A 'Timestamp' proto indicating the time this document was
|
||||
* read.
|
||||
* @param encoding One of 'json' or 'protobufJS'. Applies to both the
|
||||
* 'document' Proto and 'readTime'. Defaults to 'protobufJS'.
|
||||
* @returns A QueryDocumentSnapshot for existing documents, otherwise a
|
||||
* DocumentSnapshot.
|
||||
*/
|
||||
snapshot_(documentName: string, readTime?: google.protobuf.ITimestamp, encoding?: 'protobufJS'): DocumentSnapshot;
|
||||
/** @private */
|
||||
snapshot_(documentName: string, readTime: string, encoding: 'json'): DocumentSnapshot;
|
||||
/** @private */
|
||||
snapshot_(document: api.IDocument, readTime: google.protobuf.ITimestamp, encoding?: 'protobufJS'): QueryDocumentSnapshot;
|
||||
/** @private */
|
||||
snapshot_(document: {
|
||||
[k: string]: unknown;
|
||||
}, readTime: string, encoding: 'json'): QueryDocumentSnapshot;
|
||||
/**
|
||||
* Creates a new `BundleBuilder` instance to package selected Firestore data into
|
||||
* a bundle.
|
||||
*
|
||||
* @param bundleId. The id of the bundle. When loaded on clients, client SDKs use this id
|
||||
* and the timestamp associated with the built bundle to tell if it has been loaded already.
|
||||
* If not specified, a random identifier will be used.
|
||||
*/
|
||||
bundle(name?: string): BundleBuilder;
|
||||
/**
|
||||
* Function executed by {@link Firestore#runTransaction} within the transaction
|
||||
* context.
|
||||
*
|
||||
* @callback Firestore~updateFunction
|
||||
* @template T
|
||||
* @param {Transaction} transaction The transaction object for this
|
||||
* transaction.
|
||||
* @returns {Promise<T>} The promise returned at the end of the transaction.
|
||||
* This promise will be returned by {@link Firestore#runTransaction} if the
|
||||
* transaction completed successfully.
|
||||
*/
|
||||
/**
|
||||
* Options object for {@link Firestore#runTransaction} to configure a
|
||||
* read-only transaction.
|
||||
*
|
||||
* @param {true} readOnly Set to true to indicate a read-only transaction.
|
||||
* @param {Timestamp=} readTime If specified, documents are read at the given
|
||||
* time. This may not be more than 60 seconds in the past from when the
|
||||
* request is processed by the server.
|
||||
* @typedef {Object} Firestore~ReadOnlyTransactionOptions
|
||||
*/
|
||||
/**
|
||||
* Options object for {@link Firestore#runTransaction} to configure a
|
||||
* read-write transaction.
|
||||
*
|
||||
* @param {false=} readOnly Set to false or omit to indicate a read-write
|
||||
* transaction.
|
||||
* @param {number=} maxAttempts The maximum number of attempts for this
|
||||
* transaction. Defaults to 5.
|
||||
* @typedef {Object} Firestore~ReadWriteTransactionOptions
|
||||
*/
|
||||
/**
|
||||
* Executes the given updateFunction and commits the changes applied within
|
||||
* the transaction.
|
||||
*
|
||||
* You can use the transaction object passed to 'updateFunction' to read and
|
||||
* modify Firestore documents under lock. You have to perform all reads before
|
||||
* before you perform any write.
|
||||
*
|
||||
* Transactions can be performed as read-only or read-write transactions. By
|
||||
* default, transactions are executed in read-write mode.
|
||||
*
|
||||
* A read-write transaction obtains a pessimistic lock on all documents that
|
||||
* are read during the transaction. These locks block other transactions,
|
||||
* batched writes, and other non-transactional writes from changing that
|
||||
* document. Any writes in a read-write transactions are committed once
|
||||
* 'updateFunction' resolves, which also releases all locks.
|
||||
*
|
||||
* If a read-write transaction fails with contention, the transaction is
|
||||
* retried up to five times. The `updateFunction` is invoked once for each
|
||||
* attempt.
|
||||
*
|
||||
* Read-only transactions do not lock documents. They can be used to read
|
||||
* documents at a consistent snapshot in time, which may be up to 60 seconds
|
||||
* in the past. Read-only transactions are not retried.
|
||||
*
|
||||
* Transactions time out after 60 seconds if no documents are read.
|
||||
* Transactions that are not committed within than 270 seconds are also
|
||||
* aborted. Any remaining locks are released when a transaction times out.
|
||||
*
|
||||
* @template T
|
||||
* @param {Firestore~updateFunction} updateFunction The user function to
|
||||
* execute within the transaction context.
|
||||
* @param {
|
||||
* Firestore~ReadWriteTransactionOptions|Firestore~ReadOnlyTransactionOptions=
|
||||
* } transactionOptions Transaction options.
|
||||
* @returns {Promise<T>} If the transaction completed successfully or was
|
||||
* explicitly aborted (by the updateFunction returning a failed Promise), the
|
||||
* Promise returned by the updateFunction will be returned here. Else if the
|
||||
* transaction failed, a rejected Promise with the corresponding failure
|
||||
* error will be returned.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let counterTransaction = firestore.runTransaction(transaction => {
|
||||
* let documentRef = firestore.doc('col/doc');
|
||||
* return transaction.get(documentRef).then(doc => {
|
||||
* if (doc.exists) {
|
||||
* let count = doc.get('count') || 0;
|
||||
* if (count > 10) {
|
||||
* return Promise.reject('Reached maximum count');
|
||||
* }
|
||||
* transaction.update(documentRef, { count: ++count });
|
||||
* return Promise.resolve(count);
|
||||
* }
|
||||
*
|
||||
* transaction.create(documentRef, { count: 1 });
|
||||
* return Promise.resolve(1);
|
||||
* });
|
||||
* });
|
||||
*
|
||||
* counterTransaction.then(res => {
|
||||
* console.log(`Count updated to ${res}`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
runTransaction<T>(updateFunction: (transaction: Transaction) => Promise<T>, transactionOptions?: firestore.ReadWriteTransactionOptions | firestore.ReadOnlyTransactionOptions): Promise<T>;
|
||||
/**
|
||||
* Fetches the root collections that are associated with this Firestore
|
||||
* database.
|
||||
*
|
||||
* @returns {Promise.<Array.<CollectionReference>>} A Promise that resolves
|
||||
* with an array of CollectionReferences.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* firestore.listCollections().then(collections => {
|
||||
* for (let collection of collections) {
|
||||
* console.log(`Found collection with id: ${collection.id}`);
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
listCollections(): Promise<CollectionReference[]>;
|
||||
/**
|
||||
* Retrieves multiple documents from Firestore.
|
||||
*
|
||||
* The first argument is required and must be of type `DocumentReference`
|
||||
* followed by any additional `DocumentReference` documents. If used, the
|
||||
* optional `ReadOptions` must be the last argument.
|
||||
*
|
||||
* @param {...DocumentReference|ReadOptions} documentRefsOrReadOptions The
|
||||
* `DocumentReferences` to receive, followed by an optional field mask.
|
||||
* @returns {Promise<Array.<DocumentSnapshot>>} A Promise that
|
||||
* contains an array with the resulting document snapshots.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let docRef1 = firestore.doc('col/doc1');
|
||||
* let docRef2 = firestore.doc('col/doc2');
|
||||
*
|
||||
* firestore.getAll(docRef1, docRef2, { fieldMask: ['user'] }).then(docs => {
|
||||
* console.log(`First document: ${JSON.stringify(docs[0])}`);
|
||||
* console.log(`Second document: ${JSON.stringify(docs[1])}`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
getAll<AppModelType, DbModelType extends firestore.DocumentData>(...documentRefsOrReadOptions: Array<firestore.DocumentReference<AppModelType, DbModelType> | firestore.ReadOptions>): Promise<Array<DocumentSnapshot<AppModelType, DbModelType>>>;
|
||||
/**
|
||||
* Registers a listener on this client, incrementing the listener count. This
|
||||
* is used to verify that all listeners are unsubscribed when terminate() is
|
||||
* called.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
registerListener(): void;
|
||||
/**
|
||||
* Unregisters a listener on this client, decrementing the listener count.
|
||||
* This is used to verify that all listeners are unsubscribed when terminate()
|
||||
* is called.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
unregisterListener(): void;
|
||||
/**
|
||||
* Increments the number of open BulkWriter instances. This is used to verify
|
||||
* that all pending operations are complete when terminate() is called.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_incrementBulkWritersCount(): void;
|
||||
/**
|
||||
* Decrements the number of open BulkWriter instances. This is used to verify
|
||||
* that all pending operations are complete when terminate() is called.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_decrementBulkWritersCount(): void;
|
||||
/**
|
||||
* Recursively deletes all documents and subcollections at and under the
|
||||
* specified level.
|
||||
*
|
||||
* If any delete fails, the promise is rejected with an error message
|
||||
* containing the number of failed deletes and the stack trace of the last
|
||||
* failed delete. The provided reference is deleted regardless of whether
|
||||
* all deletes succeeded.
|
||||
*
|
||||
* `recursiveDelete()` uses a BulkWriter instance with default settings to
|
||||
* perform the deletes. To customize throttling rates or add success/error
|
||||
* callbacks, pass in a custom BulkWriter instance.
|
||||
*
|
||||
* @param ref The reference of a document or collection to delete.
|
||||
* @param bulkWriter A custom BulkWriter instance used to perform the
|
||||
* deletes.
|
||||
* @return A promise that resolves when all deletes have been performed.
|
||||
* The promise is rejected if any of the deletes fail.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* // Recursively delete a reference and log the references of failures.
|
||||
* const bulkWriter = firestore.bulkWriter();
|
||||
* bulkWriter
|
||||
* .onWriteError((error) => {
|
||||
* if (
|
||||
* error.failedAttempts < MAX_RETRY_ATTEMPTS
|
||||
* ) {
|
||||
* return true;
|
||||
* } else {
|
||||
* console.log('Failed write at document: ', error.documentRef.path);
|
||||
* return false;
|
||||
* }
|
||||
* });
|
||||
* await firestore.recursiveDelete(docRef, bulkWriter);
|
||||
* ```
|
||||
*/
|
||||
recursiveDelete(ref: firestore.CollectionReference<any, any> | firestore.DocumentReference<any, any>, bulkWriter?: BulkWriter): Promise<void>;
|
||||
/**
|
||||
* This overload is not private in order to test the query resumption with
|
||||
* startAfter() once the RecursiveDelete instance has MAX_PENDING_OPS pending.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
_recursiveDelete(ref: firestore.CollectionReference<unknown> | firestore.DocumentReference<unknown>, maxPendingOps: number, minPendingOps: number, bulkWriter?: BulkWriter): Promise<void>;
|
||||
/**
|
||||
* Terminates the Firestore client and closes all open streams.
|
||||
*
|
||||
* @return A Promise that resolves when the client is terminated.
|
||||
*/
|
||||
terminate(): Promise<void>;
|
||||
/**
|
||||
* Returns the Project ID to serve as the JSON representation of this
|
||||
* Firestore instance.
|
||||
*
|
||||
* @return An object that contains the project ID (or `undefined` if not yet
|
||||
* available).
|
||||
*/
|
||||
toJSON(): object;
|
||||
/**
|
||||
* Initializes the client if it is not already initialized. All methods in the
|
||||
* SDK can be used after this method completes.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param requestTag A unique client-assigned identifier that caused this
|
||||
* initialization.
|
||||
* @return A Promise that resolves when the client is initialized.
|
||||
*/
|
||||
initializeIfNeeded(requestTag: string): Promise<void>;
|
||||
/**
|
||||
* Returns GAX call options that set the cloud resource header.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private createCallOptions;
|
||||
/**
|
||||
* A function returning a Promise that can be retried.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @callback retryFunction
|
||||
* @returns {Promise} A Promise indicating the function's success.
|
||||
*/
|
||||
/**
|
||||
* Helper method that retries failed Promises.
|
||||
*
|
||||
* If 'delayMs' is specified, waits 'delayMs' between invocations. Otherwise,
|
||||
* schedules the first attempt immediately, and then waits 100 milliseconds
|
||||
* for further attempts.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param methodName Name of the Veneer API endpoint that takes a request
|
||||
* and GAX options.
|
||||
* @param requestTag A unique client-assigned identifier for this request.
|
||||
* @param func Method returning a Promise than can be retried.
|
||||
* @returns A Promise with the function's result if successful within
|
||||
* `attemptsRemaining`. Otherwise, returns the last rejected Promise.
|
||||
*/
|
||||
private _retry;
|
||||
/**
|
||||
* Waits for the provided stream to become active and returns a paused but
|
||||
* healthy stream. If an error occurs before the first byte is read, the
|
||||
* method rejects the returned Promise.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param backendStream The Node stream to monitor.
|
||||
* @param lifetime A Promise that resolves when the stream receives an 'end',
|
||||
* 'close' or 'finish' message.
|
||||
* @param requestTag A unique client-assigned identifier for this request.
|
||||
* @param request If specified, the request that should be written to the
|
||||
* stream after opening.
|
||||
* @returns A guaranteed healthy stream that should be used instead of
|
||||
* `backendStream`.
|
||||
*/
|
||||
private _initializeStream;
|
||||
/**
|
||||
* A funnel for all non-streaming API requests, assigning a project ID where
|
||||
* necessary within the request options.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param methodName Name of the Veneer API endpoint that takes a request
|
||||
* and GAX options.
|
||||
* @param request The Protobuf request to send.
|
||||
* @param requestTag A unique client-assigned identifier for this request.
|
||||
* @param retryCodes If provided, a custom list of retry codes. If not
|
||||
* provided, retry is based on the behavior as defined in the ServiceConfig.
|
||||
* @returns A Promise with the request result.
|
||||
*/
|
||||
request<Req, Resp>(methodName: FirestoreUnaryMethod, request: Req, requestTag: string, retryCodes?: number[]): Promise<Resp>;
|
||||
/**
|
||||
* A funnel for streaming API requests, assigning a project ID where necessary
|
||||
* within the request options.
|
||||
*
|
||||
* The stream is returned in paused state and needs to be resumed once all
|
||||
* listeners are attached.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param methodName Name of the streaming Veneer API endpoint that
|
||||
* takes a request and GAX options.
|
||||
* @param bidrectional Whether the request is bidirectional (true) or
|
||||
* unidirectional (false_
|
||||
* @param request The Protobuf request to send.
|
||||
* @param requestTag A unique client-assigned identifier for this request.
|
||||
* @returns A Promise with the resulting read-only stream.
|
||||
*/
|
||||
requestStream(methodName: FirestoreStreamingMethod, bidrectional: boolean, request: {}, requestTag: string): Promise<Duplex>;
|
||||
}
|
||||
/**
|
||||
* A logging function that takes a single string.
|
||||
*
|
||||
* @callback Firestore~logFunction
|
||||
* @param {string} Log message
|
||||
*/
|
||||
/**
|
||||
* The default export of the `@google-cloud/firestore` package is the
|
||||
* {@link Firestore} class.
|
||||
*
|
||||
* See {@link Firestore} and {@link ClientConfig} for client methods and
|
||||
* configuration options.
|
||||
*
|
||||
* @module {Firestore} @google-cloud/firestore
|
||||
* @alias nodejs-firestore
|
||||
*
|
||||
* @example Install the client library with <a href="https://www.npmjs.com/">npm</a>:
|
||||
* ```
|
||||
* npm install --save @google-cloud/firestore
|
||||
*
|
||||
* ```
|
||||
* @example Import the client library
|
||||
* ```
|
||||
* var Firestore = require('@google-cloud/firestore');
|
||||
*
|
||||
* ```
|
||||
* @example Create a client that uses <a href="https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application">Application Default Credentials (ADC)</a>:
|
||||
* ```
|
||||
* var firestore = new Firestore();
|
||||
*
|
||||
* ```
|
||||
* @example Create a client with <a href="https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually">explicit credentials</a>:
|
||||
* ```
|
||||
* var firestore = new Firestore({ projectId:
|
||||
* 'your-project-id', keyFilename: '/path/to/keyfile.json'
|
||||
* });
|
||||
*
|
||||
* ```
|
||||
* @example <caption>include:samples/quickstart.js</caption>
|
||||
* region_tag:firestore_quickstart
|
||||
* Full quickstart example:
|
||||
*/
|
||||
export default Firestore;
|
||||
1555
server/node_modules/@google-cloud/firestore/build/src/index.js
generated
vendored
Normal file
1555
server/node_modules/@google-cloud/firestore/build/src/index.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
37
server/node_modules/@google-cloud/firestore/build/src/logger.d.ts
generated
vendored
Normal file
37
server/node_modules/@google-cloud/firestore/build/src/logger.d.ts
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
/*!
|
||||
* Copyright 2018 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
/**
|
||||
* Log function to use for debug output. By default, we don't perform any
|
||||
* logging.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare function logger(methodName: string, requestTag: string | null, logMessage: string, ...additionalArgs: unknown[]): void;
|
||||
/**
|
||||
* Sets or disables the log function for all active Firestore instances.
|
||||
*
|
||||
* @param logger A log function that takes a message (such as `console.log`) or
|
||||
* `null` to turn off logging.
|
||||
*/
|
||||
export declare function setLogFunction(logger: ((msg: string) => void) | null): void;
|
||||
/**
|
||||
* Sets the library version to be used in log messages.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare function setLibVersion(version: string): void;
|
||||
63
server/node_modules/@google-cloud/firestore/build/src/logger.js
generated
vendored
Normal file
63
server/node_modules/@google-cloud/firestore/build/src/logger.js
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
"use strict";
|
||||
/*!
|
||||
* Copyright 2018 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.logger = logger;
|
||||
exports.setLogFunction = setLogFunction;
|
||||
exports.setLibVersion = setLibVersion;
|
||||
const util = require("util");
|
||||
const validate_1 = require("./validate");
|
||||
/*! The Firestore library version */
|
||||
let libVersion;
|
||||
/*! The external function used to emit logs. */
|
||||
let logFunction = null;
|
||||
/**
|
||||
* Log function to use for debug output. By default, we don't perform any
|
||||
* logging.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
function logger(methodName, requestTag, logMessage, ...additionalArgs) {
|
||||
requestTag = requestTag || '#####';
|
||||
if (logFunction) {
|
||||
const formattedMessage = util.format(logMessage, ...additionalArgs);
|
||||
const time = new Date().toISOString();
|
||||
logFunction(`Firestore (${libVersion}) ${time} ${requestTag} [${methodName}]: ` +
|
||||
formattedMessage);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Sets or disables the log function for all active Firestore instances.
|
||||
*
|
||||
* @param logger A log function that takes a message (such as `console.log`) or
|
||||
* `null` to turn off logging.
|
||||
*/
|
||||
function setLogFunction(logger) {
|
||||
if (logger !== null)
|
||||
(0, validate_1.validateFunction)('logger', logger);
|
||||
logFunction = logger;
|
||||
}
|
||||
/**
|
||||
* Sets the library version to be used in log messages.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
function setLibVersion(version) {
|
||||
libVersion = version;
|
||||
}
|
||||
//# sourceMappingURL=logger.js.map
|
||||
18
server/node_modules/@google-cloud/firestore/build/src/map-type.d.ts
generated
vendored
Normal file
18
server/node_modules/@google-cloud/firestore/build/src/map-type.d.ts
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
/*!
|
||||
* Copyright 2024 Google LLC. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
export declare const RESERVED_MAP_KEY = "__type__";
|
||||
export declare const RESERVED_MAP_KEY_VECTOR_VALUE = "__vector__";
|
||||
export declare const VECTOR_MAP_VECTORS_KEY = "value";
|
||||
22
server/node_modules/@google-cloud/firestore/build/src/map-type.js
generated
vendored
Normal file
22
server/node_modules/@google-cloud/firestore/build/src/map-type.js
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
"use strict";
|
||||
/*!
|
||||
* Copyright 2024 Google LLC. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.VECTOR_MAP_VECTORS_KEY = exports.RESERVED_MAP_KEY_VECTOR_VALUE = exports.RESERVED_MAP_KEY = void 0;
|
||||
exports.RESERVED_MAP_KEY = '__type__';
|
||||
exports.RESERVED_MAP_KEY_VECTOR_VALUE = '__vector__';
|
||||
exports.VECTOR_MAP_VECTORS_KEY = 'value';
|
||||
//# sourceMappingURL=map-type.js.map
|
||||
38
server/node_modules/@google-cloud/firestore/build/src/order.d.ts
generated
vendored
Normal file
38
server/node_modules/@google-cloud/firestore/build/src/order.d.ts
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
/*!
|
||||
* Copyright 2017 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import { google } from '../protos/firestore_v1_proto_api';
|
||||
import api = google.firestore.v1;
|
||||
/*!
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare function primitiveComparator(left: string | boolean | number, right: string | boolean | number): number;
|
||||
/*!
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare function compareArrays(left: api.IValue[], right: api.IValue[]): number;
|
||||
/*!
|
||||
* Compare strings in UTF-8 encoded byte order
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare function compareUtf8Strings(left: string, right: string): number;
|
||||
/*!
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare function compare(left: api.IValue, right: api.IValue): number;
|
||||
326
server/node_modules/@google-cloud/firestore/build/src/order.js
generated
vendored
Normal file
326
server/node_modules/@google-cloud/firestore/build/src/order.js
generated
vendored
Normal file
@@ -0,0 +1,326 @@
|
||||
"use strict";
|
||||
/*!
|
||||
* Copyright 2017 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.primitiveComparator = primitiveComparator;
|
||||
exports.compareArrays = compareArrays;
|
||||
exports.compareUtf8Strings = compareUtf8Strings;
|
||||
exports.compare = compare;
|
||||
const convert_1 = require("./convert");
|
||||
const path_1 = require("./path");
|
||||
/*!
|
||||
* The type order as defined by the backend.
|
||||
*/
|
||||
var TypeOrder;
|
||||
(function (TypeOrder) {
|
||||
TypeOrder[TypeOrder["NULL"] = 0] = "NULL";
|
||||
TypeOrder[TypeOrder["BOOLEAN"] = 1] = "BOOLEAN";
|
||||
TypeOrder[TypeOrder["NUMBER"] = 2] = "NUMBER";
|
||||
TypeOrder[TypeOrder["TIMESTAMP"] = 3] = "TIMESTAMP";
|
||||
TypeOrder[TypeOrder["STRING"] = 4] = "STRING";
|
||||
TypeOrder[TypeOrder["BLOB"] = 5] = "BLOB";
|
||||
TypeOrder[TypeOrder["REF"] = 6] = "REF";
|
||||
TypeOrder[TypeOrder["GEO_POINT"] = 7] = "GEO_POINT";
|
||||
TypeOrder[TypeOrder["ARRAY"] = 8] = "ARRAY";
|
||||
TypeOrder[TypeOrder["VECTOR"] = 9] = "VECTOR";
|
||||
TypeOrder[TypeOrder["OBJECT"] = 10] = "OBJECT";
|
||||
})(TypeOrder || (TypeOrder = {}));
|
||||
/*!
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
function typeOrder(val) {
|
||||
const valueType = (0, convert_1.detectValueType)(val);
|
||||
switch (valueType) {
|
||||
case 'nullValue':
|
||||
return TypeOrder.NULL;
|
||||
case 'integerValue':
|
||||
return TypeOrder.NUMBER;
|
||||
case 'doubleValue':
|
||||
return TypeOrder.NUMBER;
|
||||
case 'stringValue':
|
||||
return TypeOrder.STRING;
|
||||
case 'booleanValue':
|
||||
return TypeOrder.BOOLEAN;
|
||||
case 'arrayValue':
|
||||
return TypeOrder.ARRAY;
|
||||
case 'timestampValue':
|
||||
return TypeOrder.TIMESTAMP;
|
||||
case 'geoPointValue':
|
||||
return TypeOrder.GEO_POINT;
|
||||
case 'bytesValue':
|
||||
return TypeOrder.BLOB;
|
||||
case 'referenceValue':
|
||||
return TypeOrder.REF;
|
||||
case 'mapValue':
|
||||
return TypeOrder.OBJECT;
|
||||
case 'vectorValue':
|
||||
return TypeOrder.VECTOR;
|
||||
default:
|
||||
throw new Error('Unexpected value type: ' + valueType);
|
||||
}
|
||||
}
|
||||
/*!
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
function primitiveComparator(left, right) {
|
||||
if (left < right) {
|
||||
return -1;
|
||||
}
|
||||
if (left > right) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
/*!
|
||||
* Utility function to compare doubles (using Firestore semantics for NaN).
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
function compareNumbers(left, right) {
|
||||
if (left < right) {
|
||||
return -1;
|
||||
}
|
||||
if (left > right) {
|
||||
return 1;
|
||||
}
|
||||
if (left === right) {
|
||||
return 0;
|
||||
}
|
||||
// one or both are NaN.
|
||||
if (isNaN(left)) {
|
||||
return isNaN(right) ? 0 : -1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
/*!
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
function compareNumberProtos(left, right) {
|
||||
let leftValue, rightValue;
|
||||
if (left.integerValue !== undefined) {
|
||||
leftValue = Number(left.integerValue);
|
||||
}
|
||||
else {
|
||||
leftValue = Number(left.doubleValue);
|
||||
}
|
||||
if (right.integerValue !== undefined) {
|
||||
rightValue = Number(right.integerValue);
|
||||
}
|
||||
else {
|
||||
rightValue = Number(right.doubleValue);
|
||||
}
|
||||
return compareNumbers(leftValue, rightValue);
|
||||
}
|
||||
/*!
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
function compareTimestamps(left, right) {
|
||||
const seconds = primitiveComparator(left.seconds || 0, right.seconds || 0);
|
||||
if (seconds !== 0) {
|
||||
return seconds;
|
||||
}
|
||||
return primitiveComparator(left.nanos || 0, right.nanos || 0);
|
||||
}
|
||||
/*!
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
function compareBlobs(left, right) {
|
||||
if (!(left instanceof Buffer) || !(right instanceof Buffer)) {
|
||||
throw new Error('Blobs can only be compared if they are Buffers.');
|
||||
}
|
||||
return Buffer.compare(left, right);
|
||||
}
|
||||
/*!
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
function compareReferenceProtos(left, right) {
|
||||
const leftPath = path_1.QualifiedResourcePath.fromSlashSeparatedString(left.referenceValue);
|
||||
const rightPath = path_1.QualifiedResourcePath.fromSlashSeparatedString(right.referenceValue);
|
||||
return leftPath.compareTo(rightPath);
|
||||
}
|
||||
/*!
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
function compareGeoPoints(left, right) {
|
||||
return (primitiveComparator(left.latitude || 0, right.latitude || 0) ||
|
||||
primitiveComparator(left.longitude || 0, right.longitude || 0));
|
||||
}
|
||||
/*!
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
function compareArrays(left, right) {
|
||||
for (let i = 0; i < left.length && i < right.length; i++) {
|
||||
const valueComparison = compare(left[i], right[i]);
|
||||
if (valueComparison !== 0) {
|
||||
return valueComparison;
|
||||
}
|
||||
}
|
||||
// If all the values matched so far, just check the length.
|
||||
return primitiveComparator(left.length, right.length);
|
||||
}
|
||||
/*!
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
function compareObjects(left, right) {
|
||||
// This requires iterating over the keys in the object in order and doing a
|
||||
// deep comparison.
|
||||
const leftKeys = Object.keys(left);
|
||||
const rightKeys = Object.keys(right);
|
||||
leftKeys.sort();
|
||||
rightKeys.sort();
|
||||
for (let i = 0; i < leftKeys.length && i < rightKeys.length; i++) {
|
||||
const keyComparison = compareUtf8Strings(leftKeys[i], rightKeys[i]);
|
||||
if (keyComparison !== 0) {
|
||||
return keyComparison;
|
||||
}
|
||||
const key = leftKeys[i];
|
||||
const valueComparison = compare(left[key], right[key]);
|
||||
if (valueComparison !== 0) {
|
||||
return valueComparison;
|
||||
}
|
||||
}
|
||||
// If all the keys matched so far, just check the length.
|
||||
return primitiveComparator(leftKeys.length, rightKeys.length);
|
||||
}
|
||||
/*!
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
function compareVectors(left, right) {
|
||||
var _a, _b, _c, _d, _e, _f;
|
||||
// The vector is a map, but only vector value is compared.
|
||||
const leftArray = (_c = (_b = (_a = left === null || left === void 0 ? void 0 : left['value']) === null || _a === void 0 ? void 0 : _a.arrayValue) === null || _b === void 0 ? void 0 : _b.values) !== null && _c !== void 0 ? _c : [];
|
||||
const rightArray = (_f = (_e = (_d = right === null || right === void 0 ? void 0 : right['value']) === null || _d === void 0 ? void 0 : _d.arrayValue) === null || _e === void 0 ? void 0 : _e.values) !== null && _f !== void 0 ? _f : [];
|
||||
const lengthCompare = primitiveComparator(leftArray.length, rightArray.length);
|
||||
if (lengthCompare !== 0) {
|
||||
return lengthCompare;
|
||||
}
|
||||
return compareArrays(leftArray, rightArray);
|
||||
}
|
||||
/*!
|
||||
* Compare strings in UTF-8 encoded byte order
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
function compareUtf8Strings(left, right) {
|
||||
// Find the first differing character (a.k.a. "UTF-16 code unit") in the two strings and,
|
||||
// if found, use that character to determine the relative ordering of the two strings as a
|
||||
// whole. Comparing UTF-16 strings in UTF-8 byte order can be done simply and efficiently by
|
||||
// comparing the UTF-16 code units (chars). This serendipitously works because of the way UTF-8
|
||||
// and UTF-16 happen to represent Unicode code points.
|
||||
//
|
||||
// After finding the first pair of differing characters, there are two cases:
|
||||
//
|
||||
// Case 1: Both characters are non-surrogates (code points less than or equal to 0xFFFF) or
|
||||
// both are surrogates from a surrogate pair (that collectively represent code points greater
|
||||
// than 0xFFFF). In this case their numeric order as UTF-16 code units is the same as the
|
||||
// lexicographical order of their corresponding UTF-8 byte sequences. A direct comparison is
|
||||
// sufficient.
|
||||
//
|
||||
// Case 2: One character is a surrogate and the other is not. In this case the surrogate-
|
||||
// containing string is always ordered after the non-surrogate. This is because surrogates are
|
||||
// used to represent code points greater than 0xFFFF which have 4-byte UTF-8 representations
|
||||
// and are lexicographically greater than the 1, 2, or 3-byte representations of code points
|
||||
// less than or equal to 0xFFFF.
|
||||
//
|
||||
// An example of why Case 2 is required is comparing the following two Unicode code points:
|
||||
//
|
||||
// |-----------------------|------------|---------------------|-----------------|
|
||||
// | Name | Code Point | UTF-8 Encoding | UTF-16 Encoding |
|
||||
// |-----------------------|------------|---------------------|-----------------|
|
||||
// | Replacement Character | U+FFFD | 0xEF 0xBF 0xBD | 0xFFFD |
|
||||
// | Grinning Face | U+1F600 | 0xF0 0x9F 0x98 0x80 | 0xD83D 0xDE00 |
|
||||
// |-----------------------|------------|---------------------|-----------------|
|
||||
//
|
||||
// A lexicographical comparison of the UTF-8 encodings of these code points would order
|
||||
// "Replacement Character" _before_ "Grinning Face" because 0xEF is less than 0xF0. However, a
|
||||
// direct comparison of the UTF-16 code units, as would be done in case 1, would erroneously
|
||||
// produce the _opposite_ ordering, because 0xFFFD is _greater than_ 0xD83D. As it turns out,
|
||||
// this relative ordering holds for all comparisons of UTF-16 code points requiring a surrogate
|
||||
// pair with those that do not.
|
||||
const length = Math.min(left.length, right.length);
|
||||
for (let i = 0; i < length; i++) {
|
||||
const leftChar = left.charAt(i);
|
||||
const rightChar = right.charAt(i);
|
||||
if (leftChar !== rightChar) {
|
||||
return isSurrogate(leftChar) === isSurrogate(rightChar)
|
||||
? primitiveComparator(leftChar, rightChar)
|
||||
: isSurrogate(leftChar)
|
||||
? 1
|
||||
: -1;
|
||||
}
|
||||
}
|
||||
// Use the lengths of the strings to determine the overall comparison result since either the
|
||||
// strings were equal or one is a prefix of the other.
|
||||
return primitiveComparator(left.length, right.length);
|
||||
}
|
||||
const MIN_SURROGATE = 0xd800;
|
||||
const MAX_SURROGATE = 0xdfff;
|
||||
function isSurrogate(s) {
|
||||
const c = s.charCodeAt(0);
|
||||
return c >= MIN_SURROGATE && c <= MAX_SURROGATE;
|
||||
}
|
||||
/*!
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
function compare(left, right) {
|
||||
// First compare the types.
|
||||
const leftType = typeOrder(left);
|
||||
const rightType = typeOrder(right);
|
||||
const typeComparison = primitiveComparator(leftType, rightType);
|
||||
if (typeComparison !== 0) {
|
||||
return typeComparison;
|
||||
}
|
||||
// So they are the same type.
|
||||
switch (leftType) {
|
||||
case TypeOrder.NULL:
|
||||
// Nulls are all equal.
|
||||
return 0;
|
||||
case TypeOrder.BOOLEAN:
|
||||
return primitiveComparator(left.booleanValue, right.booleanValue);
|
||||
case TypeOrder.STRING:
|
||||
return compareUtf8Strings(left.stringValue, right.stringValue);
|
||||
case TypeOrder.NUMBER:
|
||||
return compareNumberProtos(left, right);
|
||||
case TypeOrder.TIMESTAMP:
|
||||
return compareTimestamps(left.timestampValue, right.timestampValue);
|
||||
case TypeOrder.BLOB:
|
||||
return compareBlobs(left.bytesValue, right.bytesValue);
|
||||
case TypeOrder.REF:
|
||||
return compareReferenceProtos(left, right);
|
||||
case TypeOrder.GEO_POINT:
|
||||
return compareGeoPoints(left.geoPointValue, right.geoPointValue);
|
||||
case TypeOrder.ARRAY:
|
||||
return compareArrays(left.arrayValue.values || [], right.arrayValue.values || []);
|
||||
case TypeOrder.OBJECT:
|
||||
return compareObjects(left.mapValue.fields || {}, right.mapValue.fields || {});
|
||||
case TypeOrder.VECTOR:
|
||||
return compareVectors(left.mapValue.fields || {}, right.mapValue.fields || {});
|
||||
default:
|
||||
throw new Error(`Encountered unknown type order: ${leftType}`);
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=order.js.map
|
||||
416
server/node_modules/@google-cloud/firestore/build/src/path.d.ts
generated
vendored
Normal file
416
server/node_modules/@google-cloud/firestore/build/src/path.d.ts
generated
vendored
Normal file
@@ -0,0 +1,416 @@
|
||||
/*!
|
||||
* Copyright 2017 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import * as firestore from '@google-cloud/firestore';
|
||||
import { google } from '../protos/firestore_v1_proto_api';
|
||||
import api = google.firestore.v1;
|
||||
/*!
|
||||
* The default database ID for this Firestore client. We do not yet expose the
|
||||
* ability to use different databases.
|
||||
*/
|
||||
export declare const DEFAULT_DATABASE_ID = "(default)";
|
||||
/**
|
||||
* An abstract class representing a Firestore path.
|
||||
*
|
||||
* Subclasses have to implement `split()` and `canonicalString()`.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @class
|
||||
*/
|
||||
declare abstract class Path<T> {
|
||||
protected readonly segments: string[];
|
||||
/**
|
||||
* Creates a new Path with the given segments.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @private
|
||||
* @param segments Sequence of parts of a path.
|
||||
*/
|
||||
constructor(segments: string[]);
|
||||
/**
|
||||
* Returns the number of segments of this field path.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get size(): number;
|
||||
abstract construct(segments: string[] | string): T;
|
||||
abstract split(relativePath: string): string[];
|
||||
/**
|
||||
* Create a child path beneath the current level.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param relativePath Relative path to append to the current path.
|
||||
* @returns The new path.
|
||||
*/
|
||||
append(relativePath: Path<T> | string): T;
|
||||
/**
|
||||
* Returns the path of the parent node.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns The new path or null if we are already at the root.
|
||||
*/
|
||||
parent(): T | null;
|
||||
/**
|
||||
* Checks whether the current path is a prefix of the specified path.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param other The path to check against.
|
||||
* @returns 'true' iff the current path is a prefix match with 'other'.
|
||||
*/
|
||||
isPrefixOf(other: Path<T>): boolean;
|
||||
/**
|
||||
* Compare the current path against another Path object.
|
||||
*
|
||||
* Compare the current path against another Path object. Paths are compared segment by segment,
|
||||
* prioritizing numeric IDs (e.g., "__id123__") in numeric ascending order, followed by string
|
||||
* segments in lexicographical order.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param other The path to compare to.
|
||||
* @returns -1 if current < other, 1 if current > other, 0 if equal
|
||||
*/
|
||||
compareTo(other: Path<T>): number;
|
||||
private compareSegments;
|
||||
private isNumericId;
|
||||
private extractNumericId;
|
||||
private compareNumbers;
|
||||
/**
|
||||
* Returns a copy of the underlying segments.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns A copy of the segments that make up this path.
|
||||
*/
|
||||
toArray(): string[];
|
||||
/**
|
||||
* Pops the last segment from this `Path` and returns a newly constructed
|
||||
* `Path`.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns The newly created Path.
|
||||
*/
|
||||
popLast(): T;
|
||||
/**
|
||||
* Returns true if this `Path` is equal to the provided value.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param other The value to compare against.
|
||||
* @return true if this `Path` is equal to the provided value.
|
||||
*/
|
||||
isEqual(other: Path<T>): boolean;
|
||||
}
|
||||
/**
|
||||
* A slash-separated path for navigating resources within the current Firestore
|
||||
* instance.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare class ResourcePath extends Path<ResourcePath> {
|
||||
/**
|
||||
* A default instance pointing to the root collection.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
static EMPTY: ResourcePath;
|
||||
/**
|
||||
* Constructs a ResourcePath.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param segments Sequence of names of the parts of the path.
|
||||
*/
|
||||
constructor(...segments: string[]);
|
||||
/**
|
||||
* Indicates whether this path points to a document.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get isDocument(): boolean;
|
||||
/**
|
||||
* Indicates whether this path points to a collection.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get isCollection(): boolean;
|
||||
/**
|
||||
* The last component of the path.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get id(): string | null;
|
||||
/**
|
||||
* Returns the location of this path relative to the root of the project's
|
||||
* database.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get relativeName(): string;
|
||||
/**
|
||||
* Constructs a new instance of ResourcePath.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param segments Sequence of parts of the path.
|
||||
* @returns The newly created ResourcePath.
|
||||
*/
|
||||
construct(segments: string[]): ResourcePath;
|
||||
/**
|
||||
* Splits a string into path segments, using slashes as separators.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param relativePath The path to split.
|
||||
* @returns The split path segments.
|
||||
*/
|
||||
split(relativePath: string): string[];
|
||||
/**
|
||||
* Converts this path to a fully qualified ResourcePath.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param projectId The project ID of the current Firestore project.
|
||||
* @return A fully-qualified resource path pointing to the same element.
|
||||
*/
|
||||
toQualifiedResourcePath(projectId: string, databaseId: string): QualifiedResourcePath;
|
||||
}
|
||||
/**
|
||||
* A slash-separated path that includes a project and database ID for referring
|
||||
* to resources in any Firestore project.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare class QualifiedResourcePath extends ResourcePath {
|
||||
/**
|
||||
* The project ID of this path.
|
||||
*/
|
||||
readonly projectId: string;
|
||||
/**
|
||||
* The database ID of this path.
|
||||
*/
|
||||
readonly databaseId: string;
|
||||
/**
|
||||
* Constructs a Firestore Resource Path.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param projectId The Firestore project id.
|
||||
* @param databaseId The Firestore database id.
|
||||
* @param segments Sequence of names of the parts of the path.
|
||||
*/
|
||||
constructor(projectId: string, databaseId: string, ...segments: string[]);
|
||||
/**
|
||||
* String representation of the path relative to the database root.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get relativeName(): string;
|
||||
/**
|
||||
* Creates a resource path from an absolute Firestore path.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param absolutePath A string representation of a Resource Path.
|
||||
* @returns The new ResourcePath.
|
||||
*/
|
||||
static fromSlashSeparatedString(absolutePath: string): QualifiedResourcePath;
|
||||
/**
|
||||
* Create a child path beneath the current level.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param relativePath Relative path to append to the current path.
|
||||
* @returns The new path.
|
||||
*/
|
||||
append(relativePath: ResourcePath | string): QualifiedResourcePath;
|
||||
/**
|
||||
* Create a child path beneath the current level.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns The new path.
|
||||
*/
|
||||
parent(): QualifiedResourcePath | null;
|
||||
/**
|
||||
* String representation of a ResourcePath as expected by the API.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns The representation as expected by the API.
|
||||
*/
|
||||
get formattedName(): string;
|
||||
/**
|
||||
* Constructs a new instance of ResourcePath. We need this instead of using
|
||||
* the normal constructor because polymorphic 'this' doesn't work on static
|
||||
* methods.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param segments Sequence of names of the parts of the path.
|
||||
* @returns The newly created QualifiedResourcePath.
|
||||
*/
|
||||
construct(segments: string[]): QualifiedResourcePath;
|
||||
/**
|
||||
* Convenience method to match the ResourcePath API. This method always
|
||||
* returns the current instance.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
toQualifiedResourcePath(): QualifiedResourcePath;
|
||||
/**
|
||||
* Compare the current path against another ResourcePath object.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param other The path to compare to.
|
||||
* @returns -1 if current < other, 1 if current > other, 0 if equal
|
||||
*/
|
||||
compareTo(other: ResourcePath): number;
|
||||
/**
|
||||
* Converts this ResourcePath to the Firestore Proto representation.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
toProto(): api.IValue;
|
||||
}
|
||||
/**
|
||||
* Validates that the given string can be used as a relative or absolute
|
||||
* resource path.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param arg The argument name or argument index (for varargs methods).
|
||||
* @param resourcePath The path to validate.
|
||||
* @throws if the string can't be used as a resource path.
|
||||
*/
|
||||
export declare function validateResourcePath(arg: string | number, resourcePath: string): void;
|
||||
/**
|
||||
* A dot-separated path for navigating sub-objects (e.g. nested maps) within a document.
|
||||
*
|
||||
* @class
|
||||
*/
|
||||
export declare class FieldPath extends Path<FieldPath> implements firestore.FieldPath {
|
||||
/**
|
||||
* A special sentinel value to refer to the ID of a document.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private static _DOCUMENT_ID;
|
||||
/**
|
||||
* Constructs a Firestore Field Path.
|
||||
*
|
||||
* @param {...string} segments Sequence of field names that form this path.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let query = firestore.collection('col');
|
||||
* let fieldPath = new FieldPath('f.o.o', 'bar');
|
||||
*
|
||||
* query.where(fieldPath, '==', 42).get().then(snapshot => {
|
||||
* snapshot.forEach(document => {
|
||||
* console.log(`Document contains {'f.o.o' : {'bar' : 42}}`);
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
constructor(...segments: string[]);
|
||||
/**
|
||||
* A special FieldPath value to refer to the ID of a document. It can be used
|
||||
* in queries to sort or filter by the document ID.
|
||||
*
|
||||
* @returns {FieldPath}
|
||||
*/
|
||||
static documentId(): FieldPath;
|
||||
/**
|
||||
* Turns a field path argument into a [FieldPath]{@link FieldPath}.
|
||||
* Supports FieldPaths as input (which are passed through) and dot-separated
|
||||
* strings.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param {string|FieldPath} fieldPath The FieldPath to create.
|
||||
* @returns {FieldPath} A field path representation.
|
||||
*/
|
||||
static fromArgument(fieldPath: string | firestore.FieldPath): FieldPath;
|
||||
/**
|
||||
* String representation of a FieldPath as expected by the API.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @override
|
||||
* @returns {string} The representation as expected by the API.
|
||||
*/
|
||||
get formattedName(): string;
|
||||
/**
|
||||
* Returns a string representation of this path.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns A string representing this path.
|
||||
*/
|
||||
toString(): string;
|
||||
/**
|
||||
* Splits a string into path segments, using dots as separators.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @override
|
||||
* @param {string} fieldPath The path to split.
|
||||
* @returns {Array.<string>} - The split path segments.
|
||||
*/
|
||||
split(fieldPath: string): string[];
|
||||
/**
|
||||
* Constructs a new instance of FieldPath. We need this instead of using
|
||||
* the normal constructor because polymorphic 'this' doesn't work on static
|
||||
* methods.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @override
|
||||
* @param segments Sequence of field names.
|
||||
* @returns The newly created FieldPath.
|
||||
*/
|
||||
construct(segments: string[]): FieldPath;
|
||||
/**
|
||||
* Returns true if this `FieldPath` is equal to the provided value.
|
||||
*
|
||||
* @param {*} other The value to compare against.
|
||||
* @return {boolean} true if this `FieldPath` is equal to the provided value.
|
||||
*/
|
||||
isEqual(other: FieldPath): boolean;
|
||||
}
|
||||
/**
|
||||
* Validates that the provided value can be used as a field path argument.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param arg The argument name or argument index (for varargs methods).
|
||||
* @param fieldPath The value to verify.
|
||||
* @throws if the string can't be used as a field path.
|
||||
*/
|
||||
export declare function validateFieldPath(arg: string | number, fieldPath: unknown): asserts fieldPath is string | FieldPath;
|
||||
export {};
|
||||
656
server/node_modules/@google-cloud/firestore/build/src/path.js
generated
vendored
Normal file
656
server/node_modules/@google-cloud/firestore/build/src/path.js
generated
vendored
Normal file
@@ -0,0 +1,656 @@
|
||||
"use strict";
|
||||
/*!
|
||||
* Copyright 2017 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.FieldPath = exports.QualifiedResourcePath = exports.ResourcePath = exports.DEFAULT_DATABASE_ID = void 0;
|
||||
exports.validateResourcePath = validateResourcePath;
|
||||
exports.validateFieldPath = validateFieldPath;
|
||||
const order_1 = require("./order");
|
||||
const util_1 = require("./util");
|
||||
const validate_1 = require("./validate");
|
||||
/*!
|
||||
* The default database ID for this Firestore client. We do not yet expose the
|
||||
* ability to use different databases.
|
||||
*/
|
||||
exports.DEFAULT_DATABASE_ID = '(default)';
|
||||
/*!
|
||||
* A regular expression to verify an absolute Resource Path in Firestore. It
|
||||
* extracts the project ID, the database name and the relative resource path
|
||||
* if available.
|
||||
*
|
||||
* @type {RegExp}
|
||||
*/
|
||||
const RESOURCE_PATH_RE =
|
||||
// Note: [\s\S] matches all characters including newlines.
|
||||
/^projects\/([^/]*)\/databases\/([^/]*)(?:\/documents\/)?([\s\S]*)$/;
|
||||
/*!
|
||||
* A regular expression to verify whether a field name can be passed to the
|
||||
* backend without escaping.
|
||||
*
|
||||
* @type {RegExp}
|
||||
*/
|
||||
const UNESCAPED_FIELD_NAME_RE = /^[_a-zA-Z][_a-zA-Z0-9]*$/;
|
||||
/*!
|
||||
* A regular expression to verify field paths that are passed to the API as
|
||||
* strings. Field paths that do not match this expression have to be provided
|
||||
* as a [FieldPath]{@link FieldPath} object.
|
||||
*
|
||||
* @type {RegExp}
|
||||
*/
|
||||
const FIELD_PATH_RE = /^[^*~/[\]]+$/;
|
||||
/**
|
||||
* An abstract class representing a Firestore path.
|
||||
*
|
||||
* Subclasses have to implement `split()` and `canonicalString()`.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @class
|
||||
*/
|
||||
class Path {
|
||||
/**
|
||||
* Creates a new Path with the given segments.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @private
|
||||
* @param segments Sequence of parts of a path.
|
||||
*/
|
||||
constructor(segments) {
|
||||
this.segments = segments;
|
||||
}
|
||||
/**
|
||||
* Returns the number of segments of this field path.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get size() {
|
||||
return this.segments.length;
|
||||
}
|
||||
/**
|
||||
* Create a child path beneath the current level.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param relativePath Relative path to append to the current path.
|
||||
* @returns The new path.
|
||||
*/
|
||||
append(relativePath) {
|
||||
if (relativePath instanceof Path) {
|
||||
return this.construct(this.segments.concat(relativePath.segments));
|
||||
}
|
||||
return this.construct(this.segments.concat(this.split(relativePath)));
|
||||
}
|
||||
/**
|
||||
* Returns the path of the parent node.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns The new path or null if we are already at the root.
|
||||
*/
|
||||
parent() {
|
||||
if (this.segments.length === 0) {
|
||||
return null;
|
||||
}
|
||||
return this.construct(this.segments.slice(0, this.segments.length - 1));
|
||||
}
|
||||
/**
|
||||
* Checks whether the current path is a prefix of the specified path.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param other The path to check against.
|
||||
* @returns 'true' iff the current path is a prefix match with 'other'.
|
||||
*/
|
||||
isPrefixOf(other) {
|
||||
if (other.segments.length < this.segments.length) {
|
||||
return false;
|
||||
}
|
||||
for (let i = 0; i < this.segments.length; i++) {
|
||||
if (this.segments[i] !== other.segments[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
/**
|
||||
* Compare the current path against another Path object.
|
||||
*
|
||||
* Compare the current path against another Path object. Paths are compared segment by segment,
|
||||
* prioritizing numeric IDs (e.g., "__id123__") in numeric ascending order, followed by string
|
||||
* segments in lexicographical order.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param other The path to compare to.
|
||||
* @returns -1 if current < other, 1 if current > other, 0 if equal
|
||||
*/
|
||||
compareTo(other) {
|
||||
const len = Math.min(this.segments.length, other.segments.length);
|
||||
for (let i = 0; i < len; i++) {
|
||||
const comparison = this.compareSegments(this.segments[i], other.segments[i]);
|
||||
if (comparison !== 0) {
|
||||
return comparison;
|
||||
}
|
||||
}
|
||||
return (0, order_1.primitiveComparator)(this.segments.length, other.segments.length);
|
||||
}
|
||||
compareSegments(lhs, rhs) {
|
||||
const isLhsNumeric = this.isNumericId(lhs);
|
||||
const isRhsNumeric = this.isNumericId(rhs);
|
||||
if (isLhsNumeric && !isRhsNumeric) {
|
||||
// Only lhs is numeric
|
||||
return -1;
|
||||
}
|
||||
else if (!isLhsNumeric && isRhsNumeric) {
|
||||
// Only rhs is numeric
|
||||
return 1;
|
||||
}
|
||||
else if (isLhsNumeric && isRhsNumeric) {
|
||||
// both numeric
|
||||
return this.compareNumbers(this.extractNumericId(lhs), this.extractNumericId(rhs));
|
||||
}
|
||||
else {
|
||||
// both non-numeric
|
||||
return (0, order_1.compareUtf8Strings)(lhs, rhs);
|
||||
}
|
||||
}
|
||||
// Checks if a segment is a numeric ID (starts with "__id" and ends with "__").
|
||||
isNumericId(segment) {
|
||||
return segment.startsWith('__id') && segment.endsWith('__');
|
||||
}
|
||||
// Extracts the long number from a numeric ID segment.
|
||||
extractNumericId(segment) {
|
||||
return BigInt(segment.substring(4, segment.length - 2));
|
||||
}
|
||||
compareNumbers(lhs, rhs) {
|
||||
if (lhs < rhs) {
|
||||
return -1;
|
||||
}
|
||||
else if (lhs > rhs) {
|
||||
return 1;
|
||||
}
|
||||
else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Returns a copy of the underlying segments.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns A copy of the segments that make up this path.
|
||||
*/
|
||||
toArray() {
|
||||
return this.segments.slice();
|
||||
}
|
||||
/**
|
||||
* Pops the last segment from this `Path` and returns a newly constructed
|
||||
* `Path`.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns The newly created Path.
|
||||
*/
|
||||
popLast() {
|
||||
this.segments.pop();
|
||||
return this.construct(this.segments);
|
||||
}
|
||||
/**
|
||||
* Returns true if this `Path` is equal to the provided value.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param other The value to compare against.
|
||||
* @return true if this `Path` is equal to the provided value.
|
||||
*/
|
||||
isEqual(other) {
|
||||
return this === other || this.compareTo(other) === 0;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* A slash-separated path for navigating resources within the current Firestore
|
||||
* instance.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class ResourcePath extends Path {
|
||||
/**
|
||||
* Constructs a ResourcePath.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param segments Sequence of names of the parts of the path.
|
||||
*/
|
||||
constructor(...segments) {
|
||||
super(segments);
|
||||
}
|
||||
/**
|
||||
* Indicates whether this path points to a document.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get isDocument() {
|
||||
return this.segments.length > 0 && this.segments.length % 2 === 0;
|
||||
}
|
||||
/**
|
||||
* Indicates whether this path points to a collection.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get isCollection() {
|
||||
return this.segments.length % 2 === 1;
|
||||
}
|
||||
/**
|
||||
* The last component of the path.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get id() {
|
||||
if (this.segments.length > 0) {
|
||||
return this.segments[this.segments.length - 1];
|
||||
}
|
||||
return null;
|
||||
}
|
||||
/**
|
||||
* Returns the location of this path relative to the root of the project's
|
||||
* database.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get relativeName() {
|
||||
return this.segments.join('/');
|
||||
}
|
||||
/**
|
||||
* Constructs a new instance of ResourcePath.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param segments Sequence of parts of the path.
|
||||
* @returns The newly created ResourcePath.
|
||||
*/
|
||||
construct(segments) {
|
||||
return new ResourcePath(...segments);
|
||||
}
|
||||
/**
|
||||
* Splits a string into path segments, using slashes as separators.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param relativePath The path to split.
|
||||
* @returns The split path segments.
|
||||
*/
|
||||
split(relativePath) {
|
||||
// We may have an empty segment at the beginning or end if they had a
|
||||
// leading or trailing slash (which we allow).
|
||||
return relativePath.split('/').filter(segment => segment.length > 0);
|
||||
}
|
||||
/**
|
||||
* Converts this path to a fully qualified ResourcePath.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param projectId The project ID of the current Firestore project.
|
||||
* @return A fully-qualified resource path pointing to the same element.
|
||||
*/
|
||||
toQualifiedResourcePath(projectId, databaseId) {
|
||||
return new QualifiedResourcePath(projectId, databaseId, ...this.segments);
|
||||
}
|
||||
}
|
||||
exports.ResourcePath = ResourcePath;
|
||||
/**
|
||||
* A default instance pointing to the root collection.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
ResourcePath.EMPTY = new ResourcePath();
|
||||
/**
|
||||
* A slash-separated path that includes a project and database ID for referring
|
||||
* to resources in any Firestore project.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class QualifiedResourcePath extends ResourcePath {
|
||||
/**
|
||||
* Constructs a Firestore Resource Path.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param projectId The Firestore project id.
|
||||
* @param databaseId The Firestore database id.
|
||||
* @param segments Sequence of names of the parts of the path.
|
||||
*/
|
||||
constructor(projectId, databaseId, ...segments) {
|
||||
super(...segments);
|
||||
this.projectId = projectId;
|
||||
this.databaseId = databaseId;
|
||||
}
|
||||
/**
|
||||
* String representation of the path relative to the database root.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get relativeName() {
|
||||
return this.segments.join('/');
|
||||
}
|
||||
/**
|
||||
* Creates a resource path from an absolute Firestore path.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param absolutePath A string representation of a Resource Path.
|
||||
* @returns The new ResourcePath.
|
||||
*/
|
||||
static fromSlashSeparatedString(absolutePath) {
|
||||
const elements = RESOURCE_PATH_RE.exec(absolutePath);
|
||||
if (elements) {
|
||||
const project = elements[1];
|
||||
const database = elements[2];
|
||||
const path = elements[3];
|
||||
return new QualifiedResourcePath(project, database).append(path);
|
||||
}
|
||||
throw new Error(`Resource name '${absolutePath}' is not valid.`);
|
||||
}
|
||||
/**
|
||||
* Create a child path beneath the current level.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param relativePath Relative path to append to the current path.
|
||||
* @returns The new path.
|
||||
*/
|
||||
append(relativePath) {
|
||||
// `super.append()` calls `QualifiedResourcePath.construct()` when invoked
|
||||
// from here and returns a QualifiedResourcePath.
|
||||
return super.append(relativePath);
|
||||
}
|
||||
/**
|
||||
* Create a child path beneath the current level.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns The new path.
|
||||
*/
|
||||
parent() {
|
||||
return super.parent();
|
||||
}
|
||||
/**
|
||||
* String representation of a ResourcePath as expected by the API.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns The representation as expected by the API.
|
||||
*/
|
||||
get formattedName() {
|
||||
const components = [
|
||||
'projects',
|
||||
this.projectId,
|
||||
'databases',
|
||||
this.databaseId,
|
||||
'documents',
|
||||
...this.segments,
|
||||
];
|
||||
return components.join('/');
|
||||
}
|
||||
/**
|
||||
* Constructs a new instance of ResourcePath. We need this instead of using
|
||||
* the normal constructor because polymorphic 'this' doesn't work on static
|
||||
* methods.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param segments Sequence of names of the parts of the path.
|
||||
* @returns The newly created QualifiedResourcePath.
|
||||
*/
|
||||
construct(segments) {
|
||||
return new QualifiedResourcePath(this.projectId, this.databaseId, ...segments);
|
||||
}
|
||||
/**
|
||||
* Convenience method to match the ResourcePath API. This method always
|
||||
* returns the current instance.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
toQualifiedResourcePath() {
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Compare the current path against another ResourcePath object.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param other The path to compare to.
|
||||
* @returns -1 if current < other, 1 if current > other, 0 if equal
|
||||
*/
|
||||
compareTo(other) {
|
||||
if (other instanceof QualifiedResourcePath) {
|
||||
if (this.projectId < other.projectId) {
|
||||
return -1;
|
||||
}
|
||||
if (this.projectId > other.projectId) {
|
||||
return 1;
|
||||
}
|
||||
if (this.databaseId < other.databaseId) {
|
||||
return -1;
|
||||
}
|
||||
if (this.databaseId > other.databaseId) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return super.compareTo(other);
|
||||
}
|
||||
/**
|
||||
* Converts this ResourcePath to the Firestore Proto representation.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
toProto() {
|
||||
return {
|
||||
referenceValue: this.formattedName,
|
||||
};
|
||||
}
|
||||
}
|
||||
exports.QualifiedResourcePath = QualifiedResourcePath;
|
||||
/**
|
||||
* Validates that the given string can be used as a relative or absolute
|
||||
* resource path.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param arg The argument name or argument index (for varargs methods).
|
||||
* @param resourcePath The path to validate.
|
||||
* @throws if the string can't be used as a resource path.
|
||||
*/
|
||||
function validateResourcePath(arg, resourcePath) {
|
||||
if (typeof resourcePath !== 'string' || resourcePath === '') {
|
||||
throw new Error(`${(0, validate_1.invalidArgumentMessage)(arg, 'resource path')} Path must be a non-empty string.`);
|
||||
}
|
||||
if (resourcePath.indexOf('//') >= 0) {
|
||||
throw new Error(`${(0, validate_1.invalidArgumentMessage)(arg, 'resource path')} Paths must not contain //.`);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* A dot-separated path for navigating sub-objects (e.g. nested maps) within a document.
|
||||
*
|
||||
* @class
|
||||
*/
|
||||
class FieldPath extends Path {
|
||||
/**
|
||||
* Constructs a Firestore Field Path.
|
||||
*
|
||||
* @param {...string} segments Sequence of field names that form this path.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let query = firestore.collection('col');
|
||||
* let fieldPath = new FieldPath('f.o.o', 'bar');
|
||||
*
|
||||
* query.where(fieldPath, '==', 42).get().then(snapshot => {
|
||||
* snapshot.forEach(document => {
|
||||
* console.log(`Document contains {'f.o.o' : {'bar' : 42}}`);
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
constructor(...segments) {
|
||||
if (Array.isArray(segments[0])) {
|
||||
throw new Error('The FieldPath constructor no longer supports an array as its first argument. ' +
|
||||
'Please unpack your array and call FieldPath() with individual arguments.');
|
||||
}
|
||||
(0, validate_1.validateMinNumberOfArguments)('FieldPath', segments, 1);
|
||||
for (let i = 0; i < segments.length; ++i) {
|
||||
(0, validate_1.validateString)(i, segments[i]);
|
||||
if (segments[i].length === 0) {
|
||||
throw new Error(`Element at index ${i} should not be an empty string.`);
|
||||
}
|
||||
}
|
||||
super(segments);
|
||||
}
|
||||
/**
|
||||
* A special FieldPath value to refer to the ID of a document. It can be used
|
||||
* in queries to sort or filter by the document ID.
|
||||
*
|
||||
* @returns {FieldPath}
|
||||
*/
|
||||
static documentId() {
|
||||
return FieldPath._DOCUMENT_ID;
|
||||
}
|
||||
/**
|
||||
* Turns a field path argument into a [FieldPath]{@link FieldPath}.
|
||||
* Supports FieldPaths as input (which are passed through) and dot-separated
|
||||
* strings.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param {string|FieldPath} fieldPath The FieldPath to create.
|
||||
* @returns {FieldPath} A field path representation.
|
||||
*/
|
||||
static fromArgument(fieldPath) {
|
||||
// validateFieldPath() is used in all public API entry points to validate
|
||||
// that fromArgument() is only called with a Field Path or a string.
|
||||
return fieldPath instanceof FieldPath
|
||||
? fieldPath
|
||||
: new FieldPath(...fieldPath.split('.'));
|
||||
}
|
||||
/**
|
||||
* String representation of a FieldPath as expected by the API.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @override
|
||||
* @returns {string} The representation as expected by the API.
|
||||
*/
|
||||
get formattedName() {
|
||||
return this.segments
|
||||
.map(str => {
|
||||
return UNESCAPED_FIELD_NAME_RE.test(str)
|
||||
? str
|
||||
: '`' + str.replace(/\\/g, '\\\\').replace(/`/g, '\\`') + '`';
|
||||
})
|
||||
.join('.');
|
||||
}
|
||||
/**
|
||||
* Returns a string representation of this path.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns A string representing this path.
|
||||
*/
|
||||
toString() {
|
||||
return this.formattedName;
|
||||
}
|
||||
/**
|
||||
* Splits a string into path segments, using dots as separators.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @override
|
||||
* @param {string} fieldPath The path to split.
|
||||
* @returns {Array.<string>} - The split path segments.
|
||||
*/
|
||||
split(fieldPath) {
|
||||
return fieldPath.split('.');
|
||||
}
|
||||
/**
|
||||
* Constructs a new instance of FieldPath. We need this instead of using
|
||||
* the normal constructor because polymorphic 'this' doesn't work on static
|
||||
* methods.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @override
|
||||
* @param segments Sequence of field names.
|
||||
* @returns The newly created FieldPath.
|
||||
*/
|
||||
construct(segments) {
|
||||
return new FieldPath(...segments);
|
||||
}
|
||||
/**
|
||||
* Returns true if this `FieldPath` is equal to the provided value.
|
||||
*
|
||||
* @param {*} other The value to compare against.
|
||||
* @return {boolean} true if this `FieldPath` is equal to the provided value.
|
||||
*/
|
||||
isEqual(other) {
|
||||
return super.isEqual(other);
|
||||
}
|
||||
}
|
||||
exports.FieldPath = FieldPath;
|
||||
/**
|
||||
* A special sentinel value to refer to the ID of a document.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
FieldPath._DOCUMENT_ID = new FieldPath('__name__');
|
||||
/**
|
||||
* Validates that the provided value can be used as a field path argument.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param arg The argument name or argument index (for varargs methods).
|
||||
* @param fieldPath The value to verify.
|
||||
* @throws if the string can't be used as a field path.
|
||||
*/
|
||||
function validateFieldPath(arg, fieldPath) {
|
||||
if (fieldPath instanceof FieldPath) {
|
||||
return;
|
||||
}
|
||||
if (fieldPath === undefined) {
|
||||
throw new Error((0, validate_1.invalidArgumentMessage)(arg, 'field path') + ' The path cannot be omitted.');
|
||||
}
|
||||
if ((0, util_1.isObject)(fieldPath) && fieldPath.constructor.name === 'FieldPath') {
|
||||
throw new Error((0, validate_1.customObjectMessage)(arg, fieldPath));
|
||||
}
|
||||
if (typeof fieldPath !== 'string') {
|
||||
throw new Error(`${(0, validate_1.invalidArgumentMessage)(arg, 'field path')} Paths can only be specified as strings or via a FieldPath object.`);
|
||||
}
|
||||
if (fieldPath.indexOf('..') >= 0) {
|
||||
throw new Error(`${(0, validate_1.invalidArgumentMessage)(arg, 'field path')} Paths must not contain ".." in them.`);
|
||||
}
|
||||
if (fieldPath.startsWith('.') || fieldPath.endsWith('.')) {
|
||||
throw new Error(`${(0, validate_1.invalidArgumentMessage)(arg, 'field path')} Paths must not start or end with ".".`);
|
||||
}
|
||||
if (!FIELD_PATH_RE.test(fieldPath)) {
|
||||
throw new Error(`${(0, validate_1.invalidArgumentMessage)(arg, 'field path')} Paths can't be empty and must not contain
|
||||
"*~/[]".`);
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=path.js.map
|
||||
128
server/node_modules/@google-cloud/firestore/build/src/pool.d.ts
generated
vendored
Normal file
128
server/node_modules/@google-cloud/firestore/build/src/pool.d.ts
generated
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
/*!
|
||||
* Copyright 2018 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
export declare const CLIENT_TERMINATED_ERROR_MSG = "The client has already been terminated";
|
||||
/**
|
||||
* An auto-resizing pool that distributes concurrent operations over multiple
|
||||
* clients of type `T`.
|
||||
*
|
||||
* ClientPool is used within Firestore to manage a pool of GAPIC clients and
|
||||
* automatically initializes multiple clients if we issue more than 100
|
||||
* concurrent operations.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare class ClientPool<T> {
|
||||
private readonly concurrentOperationLimit;
|
||||
private readonly maxIdleClients;
|
||||
private readonly clientFactory;
|
||||
private readonly clientDestructor;
|
||||
private grpcEnabled;
|
||||
/**
|
||||
* Stores each active clients and how many operations it has outstanding.
|
||||
*/
|
||||
private activeClients;
|
||||
/**
|
||||
* A set of clients that have seen RST_STREAM errors (see
|
||||
* https://github.com/googleapis/nodejs-firestore/issues/1023) and should
|
||||
* no longer be used.
|
||||
*/
|
||||
private failedClients;
|
||||
/**
|
||||
* Whether the Firestore instance has been terminated. Once terminated, the
|
||||
* ClientPool can longer schedule new operations.
|
||||
*/
|
||||
private terminated;
|
||||
/**
|
||||
* Deferred promise that is resolved when there are no active operations on
|
||||
* the client pool after terminate() has been called.
|
||||
*/
|
||||
private terminateDeferred;
|
||||
/**
|
||||
* @param concurrentOperationLimit The number of operations that each client
|
||||
* can handle.
|
||||
* @param maxIdleClients The maximum number of idle clients to keep before
|
||||
* garbage collecting.
|
||||
* @param clientFactory A factory function called as needed when new clients
|
||||
* are required.
|
||||
* @param clientDestructor A cleanup function that is called when a client is
|
||||
* disposed of.
|
||||
*/
|
||||
constructor(concurrentOperationLimit: number, maxIdleClients: number, clientFactory: (requiresGrpc: boolean) => T, clientDestructor?: (client: T) => Promise<void>);
|
||||
/**
|
||||
* Returns an already existing client if it has less than the maximum number
|
||||
* of concurrent operations or initializes and returns a new client.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private acquire;
|
||||
/**
|
||||
* Reduces the number of operations for the provided client, potentially
|
||||
* removing it from the pool of active clients.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private release;
|
||||
/**
|
||||
* Given the current operation counts, determines if the given client should
|
||||
* be garbage collected.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private shouldGarbageCollectClient;
|
||||
/**
|
||||
* The number of currently registered clients.
|
||||
*
|
||||
* @return Number of currently registered clients.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get size(): number;
|
||||
/**
|
||||
* The number of currently active operations.
|
||||
*
|
||||
* @return Number of currently active operations.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get opCount(): number;
|
||||
/**
|
||||
* The currently active clients.
|
||||
*
|
||||
* @return The currently active clients.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get _activeClients(): Map<T, {
|
||||
activeRequestCount: number;
|
||||
grpcEnabled: boolean;
|
||||
}>;
|
||||
/**
|
||||
* Runs the provided operation in this pool. This function may create an
|
||||
* additional client if all existing clients already operate at the concurrent
|
||||
* operation limit.
|
||||
*
|
||||
* @param requestTag A unique client-assigned identifier for this operation.
|
||||
* @param op A callback function that returns a Promise. The client T will
|
||||
* be returned to the pool when callback finishes.
|
||||
* @return A Promise that resolves with the result of `op`.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
run<V>(requestTag: string, requiresGrpc: boolean, op: (client: T) => Promise<V>): Promise<V>;
|
||||
terminate(): Promise<void>;
|
||||
}
|
||||
250
server/node_modules/@google-cloud/firestore/build/src/pool.js
generated
vendored
Normal file
250
server/node_modules/@google-cloud/firestore/build/src/pool.js
generated
vendored
Normal file
@@ -0,0 +1,250 @@
|
||||
"use strict";
|
||||
/*!
|
||||
* Copyright 2018 Google Inc. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ClientPool = exports.CLIENT_TERMINATED_ERROR_MSG = void 0;
|
||||
const assert = require("assert");
|
||||
const logger_1 = require("./logger");
|
||||
const util_1 = require("./util");
|
||||
exports.CLIENT_TERMINATED_ERROR_MSG = 'The client has already been terminated';
|
||||
/**
|
||||
* An auto-resizing pool that distributes concurrent operations over multiple
|
||||
* clients of type `T`.
|
||||
*
|
||||
* ClientPool is used within Firestore to manage a pool of GAPIC clients and
|
||||
* automatically initializes multiple clients if we issue more than 100
|
||||
* concurrent operations.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class ClientPool {
|
||||
/**
|
||||
* @param concurrentOperationLimit The number of operations that each client
|
||||
* can handle.
|
||||
* @param maxIdleClients The maximum number of idle clients to keep before
|
||||
* garbage collecting.
|
||||
* @param clientFactory A factory function called as needed when new clients
|
||||
* are required.
|
||||
* @param clientDestructor A cleanup function that is called when a client is
|
||||
* disposed of.
|
||||
*/
|
||||
constructor(concurrentOperationLimit, maxIdleClients, clientFactory, clientDestructor = () => Promise.resolve()) {
|
||||
this.concurrentOperationLimit = concurrentOperationLimit;
|
||||
this.maxIdleClients = maxIdleClients;
|
||||
this.clientFactory = clientFactory;
|
||||
this.clientDestructor = clientDestructor;
|
||||
this.grpcEnabled = false;
|
||||
/**
|
||||
* Stores each active clients and how many operations it has outstanding.
|
||||
*/
|
||||
this.activeClients = new Map();
|
||||
/**
|
||||
* A set of clients that have seen RST_STREAM errors (see
|
||||
* https://github.com/googleapis/nodejs-firestore/issues/1023) and should
|
||||
* no longer be used.
|
||||
*/
|
||||
this.failedClients = new Set();
|
||||
/**
|
||||
* Whether the Firestore instance has been terminated. Once terminated, the
|
||||
* ClientPool can longer schedule new operations.
|
||||
*/
|
||||
this.terminated = false;
|
||||
/**
|
||||
* Deferred promise that is resolved when there are no active operations on
|
||||
* the client pool after terminate() has been called.
|
||||
*/
|
||||
this.terminateDeferred = new util_1.Deferred();
|
||||
}
|
||||
/**
|
||||
* Returns an already existing client if it has less than the maximum number
|
||||
* of concurrent operations or initializes and returns a new client.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
acquire(requestTag, requiresGrpc) {
|
||||
let selectedClient = null;
|
||||
let selectedClientRequestCount = -1;
|
||||
// Transition to grpc when we see the first operation that requires grpc.
|
||||
this.grpcEnabled = this.grpcEnabled || requiresGrpc;
|
||||
// Require a grpc client for this operation if we have transitioned to grpc.
|
||||
requiresGrpc = requiresGrpc || this.grpcEnabled;
|
||||
for (const [client, metadata] of this.activeClients) {
|
||||
// Use the "most-full" client that can still accommodate the request
|
||||
// in order to maximize the number of idle clients as operations start to
|
||||
// complete.
|
||||
if (!this.failedClients.has(client) &&
|
||||
metadata.activeRequestCount > selectedClientRequestCount &&
|
||||
metadata.activeRequestCount < this.concurrentOperationLimit &&
|
||||
(metadata.grpcEnabled || !requiresGrpc)) {
|
||||
selectedClient = client;
|
||||
selectedClientRequestCount = metadata.activeRequestCount;
|
||||
}
|
||||
}
|
||||
if (selectedClient) {
|
||||
(0, logger_1.logger)('ClientPool.acquire', requestTag, 'Re-using existing client with %s remaining operations', this.concurrentOperationLimit - selectedClientRequestCount);
|
||||
}
|
||||
else {
|
||||
(0, logger_1.logger)('ClientPool.acquire', requestTag, 'Creating a new client (requiresGrpc: %s)', requiresGrpc);
|
||||
selectedClient = this.clientFactory(requiresGrpc);
|
||||
selectedClientRequestCount = 0;
|
||||
assert(!this.activeClients.has(selectedClient), 'The provided client factory returned an existing instance');
|
||||
}
|
||||
this.activeClients.set(selectedClient, {
|
||||
grpcEnabled: requiresGrpc,
|
||||
activeRequestCount: selectedClientRequestCount + 1,
|
||||
});
|
||||
return selectedClient;
|
||||
}
|
||||
/**
|
||||
* Reduces the number of operations for the provided client, potentially
|
||||
* removing it from the pool of active clients.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
async release(requestTag, client) {
|
||||
const metadata = this.activeClients.get(client);
|
||||
assert(metadata && metadata.activeRequestCount > 0, 'No active requests');
|
||||
this.activeClients.set(client, {
|
||||
grpcEnabled: metadata.grpcEnabled,
|
||||
activeRequestCount: metadata.activeRequestCount - 1,
|
||||
});
|
||||
if (this.terminated && this.opCount === 0) {
|
||||
this.terminateDeferred.resolve();
|
||||
}
|
||||
if (this.shouldGarbageCollectClient(client)) {
|
||||
this.activeClients.delete(client);
|
||||
this.failedClients.delete(client);
|
||||
await this.clientDestructor(client);
|
||||
(0, logger_1.logger)('ClientPool.release', requestTag, 'Garbage collected 1 client');
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Given the current operation counts, determines if the given client should
|
||||
* be garbage collected.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
shouldGarbageCollectClient(client) {
|
||||
const clientMetadata = this.activeClients.get(client);
|
||||
if (clientMetadata.activeRequestCount !== 0) {
|
||||
// Don't garbage collect clients that have active requests.
|
||||
return false;
|
||||
}
|
||||
if (this.grpcEnabled !== clientMetadata.grpcEnabled) {
|
||||
// We are transitioning to GRPC. Garbage collect REST clients.
|
||||
return true;
|
||||
}
|
||||
// Idle clients that have received RST_STREAM errors are always garbage
|
||||
// collected.
|
||||
if (this.failedClients.has(client)) {
|
||||
return true;
|
||||
}
|
||||
// Otherwise, only garbage collect if we have too much idle capacity (e.g.
|
||||
// more than 100 idle capacity with default settings).
|
||||
let idleCapacityCount = 0;
|
||||
for (const [, metadata] of this.activeClients) {
|
||||
idleCapacityCount +=
|
||||
this.concurrentOperationLimit - metadata.activeRequestCount;
|
||||
}
|
||||
return (idleCapacityCount > this.maxIdleClients * this.concurrentOperationLimit);
|
||||
}
|
||||
/**
|
||||
* The number of currently registered clients.
|
||||
*
|
||||
* @return Number of currently registered clients.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
// Visible for testing.
|
||||
get size() {
|
||||
return this.activeClients.size;
|
||||
}
|
||||
/**
|
||||
* The number of currently active operations.
|
||||
*
|
||||
* @return Number of currently active operations.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
// Visible for testing.
|
||||
get opCount() {
|
||||
let activeOperationCount = 0;
|
||||
this.activeClients.forEach(metadata => (activeOperationCount += metadata.activeRequestCount));
|
||||
return activeOperationCount;
|
||||
}
|
||||
/**
|
||||
* The currently active clients.
|
||||
*
|
||||
* @return The currently active clients.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
// Visible for testing.
|
||||
get _activeClients() {
|
||||
return this.activeClients;
|
||||
}
|
||||
/**
|
||||
* Runs the provided operation in this pool. This function may create an
|
||||
* additional client if all existing clients already operate at the concurrent
|
||||
* operation limit.
|
||||
*
|
||||
* @param requestTag A unique client-assigned identifier for this operation.
|
||||
* @param op A callback function that returns a Promise. The client T will
|
||||
* be returned to the pool when callback finishes.
|
||||
* @return A Promise that resolves with the result of `op`.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
run(requestTag, requiresGrpc, op) {
|
||||
if (this.terminated) {
|
||||
return Promise.reject(new Error(exports.CLIENT_TERMINATED_ERROR_MSG));
|
||||
}
|
||||
const client = this.acquire(requestTag, requiresGrpc);
|
||||
return op(client)
|
||||
.catch(async (err) => {
|
||||
var _a;
|
||||
if ((_a = err.message) === null || _a === void 0 ? void 0 : _a.match(/RST_STREAM/)) {
|
||||
// Once a client has seen a RST_STREAM error, the GRPC channel can
|
||||
// no longer be used. We mark the client as failed, which ensures that
|
||||
// we open a new GRPC channel for the next request.
|
||||
this.failedClients.add(client);
|
||||
}
|
||||
await this.release(requestTag, client);
|
||||
return Promise.reject(err);
|
||||
})
|
||||
.then(async (res) => {
|
||||
await this.release(requestTag, client);
|
||||
return res;
|
||||
});
|
||||
}
|
||||
async terminate() {
|
||||
this.terminated = true;
|
||||
// Wait for all pending operations to complete before terminating.
|
||||
if (this.opCount > 0) {
|
||||
(0, logger_1.logger)('ClientPool.terminate',
|
||||
/* requestTag= */ null, 'Waiting for %s pending operations to complete before terminating', this.opCount);
|
||||
await this.terminateDeferred.promise;
|
||||
}
|
||||
for (const [client] of this.activeClients) {
|
||||
this.activeClients.delete(client);
|
||||
await this.clientDestructor(client);
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.ClientPool = ClientPool;
|
||||
//# sourceMappingURL=pool.js.map
|
||||
96
server/node_modules/@google-cloud/firestore/build/src/query-partition.d.ts
generated
vendored
Normal file
96
server/node_modules/@google-cloud/firestore/build/src/query-partition.d.ts
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
import * as firestore from '@google-cloud/firestore';
|
||||
import * as protos from '../protos/firestore_v1_proto_api';
|
||||
import { Query } from './reference/query';
|
||||
import { Firestore } from './index';
|
||||
import api = protos.google.firestore.v1;
|
||||
/**
|
||||
* A split point that can be used in a query as a starting and/or end point for
|
||||
* the query results. The cursors returned by {@link #startAt} and {@link
|
||||
* #endBefore} can only be used in a query that matches the constraint of query
|
||||
* that produced this partition.
|
||||
*
|
||||
* @class QueryPartition
|
||||
*/
|
||||
export declare class QueryPartition<AppModelType = firestore.DocumentData, DbModelType extends firestore.DocumentData = firestore.DocumentData> implements firestore.QueryPartition<AppModelType, DbModelType> {
|
||||
private readonly _firestore;
|
||||
private readonly _collectionId;
|
||||
private readonly _converter;
|
||||
private readonly _startAt;
|
||||
private readonly _endBefore;
|
||||
private readonly _serializer;
|
||||
private _memoizedStartAt;
|
||||
private _memoizedEndBefore;
|
||||
/** @private */
|
||||
constructor(_firestore: Firestore, _collectionId: string, _converter: firestore.FirestoreDataConverter<AppModelType, DbModelType>, _startAt: api.IValue[] | undefined, _endBefore: api.IValue[] | undefined);
|
||||
/**
|
||||
* The cursor that defines the first result for this partition or `undefined`
|
||||
* if this is the first partition. The cursor value must be
|
||||
* destructured when passed to `startAt()` (for example with
|
||||
* `query.startAt(...queryPartition.startAt)`).
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* const query = firestore.collectionGroup('collectionId');
|
||||
* for await (const partition of query.getPartitions(42)) {
|
||||
* let partitionedQuery = query.orderBy(FieldPath.documentId());
|
||||
* if (partition.startAt) {
|
||||
* partitionedQuery = partitionedQuery.startAt(...partition.startAt);
|
||||
* }
|
||||
* if (partition.endBefore) {
|
||||
* partitionedQuery = partitionedQuery.endBefore(...partition.endBefore);
|
||||
* }
|
||||
* const querySnapshot = await partitionedQuery.get();
|
||||
* console.log(`Partition contained ${querySnapshot.length} documents`);
|
||||
* }
|
||||
*
|
||||
* ```
|
||||
* @type {Array<*>}
|
||||
* @return {Array<*>} A cursor value that can be used with {@link
|
||||
* Query#startAt} or `undefined` if this is the first partition.
|
||||
*/
|
||||
get startAt(): unknown[] | undefined;
|
||||
/**
|
||||
* The cursor that defines the first result after this partition or
|
||||
* `undefined` if this is the last partition. The cursor value must be
|
||||
* destructured when passed to `endBefore()` (for example with
|
||||
* `query.endBefore(...queryPartition.endBefore)`).
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* const query = firestore.collectionGroup('collectionId');
|
||||
* for await (const partition of query.getPartitions(42)) {
|
||||
* let partitionedQuery = query.orderBy(FieldPath.documentId());
|
||||
* if (partition.startAt) {
|
||||
* partitionedQuery = partitionedQuery.startAt(...partition.startAt);
|
||||
* }
|
||||
* if (partition.endBefore) {
|
||||
* partitionedQuery = partitionedQuery.endBefore(...partition.endBefore);
|
||||
* }
|
||||
* const querySnapshot = await partitionedQuery.get();
|
||||
* console.log(`Partition contained ${querySnapshot.length} documents`);
|
||||
* }
|
||||
*
|
||||
* ```
|
||||
* @type {Array<*>}
|
||||
* @return {Array<*>} A cursor value that can be used with {@link
|
||||
* Query#endBefore} or `undefined` if this is the last partition.
|
||||
*/
|
||||
get endBefore(): unknown[] | undefined;
|
||||
/**
|
||||
* Returns a query that only encapsulates the documents for this partition.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* const query = firestore.collectionGroup('collectionId');
|
||||
* for await (const partition of query.getPartitions(42)) {
|
||||
* const partitionedQuery = partition.toQuery();
|
||||
* const querySnapshot = await partitionedQuery.get();
|
||||
* console.log(`Partition contained ${querySnapshot.length} documents`);
|
||||
* }
|
||||
*
|
||||
* ```
|
||||
* @return {Query<T>} A query partitioned by a {@link Query#startAt} and
|
||||
* {@link Query#endBefore} cursor.
|
||||
*/
|
||||
toQuery(): Query<AppModelType, DbModelType>;
|
||||
}
|
||||
144
server/node_modules/@google-cloud/firestore/build/src/query-partition.js
generated
vendored
Normal file
144
server/node_modules/@google-cloud/firestore/build/src/query-partition.js
generated
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
"use strict";
|
||||
/*
|
||||
* Copyright 2020 Google LLC
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.QueryPartition = void 0;
|
||||
const field_order_1 = require("./reference/field-order");
|
||||
const query_1 = require("./reference/query");
|
||||
const query_options_1 = require("./reference/query-options");
|
||||
const path_1 = require("./path");
|
||||
const serializer_1 = require("./serializer");
|
||||
/**
|
||||
* A split point that can be used in a query as a starting and/or end point for
|
||||
* the query results. The cursors returned by {@link #startAt} and {@link
|
||||
* #endBefore} can only be used in a query that matches the constraint of query
|
||||
* that produced this partition.
|
||||
*
|
||||
* @class QueryPartition
|
||||
*/
|
||||
class QueryPartition {
|
||||
/** @private */
|
||||
constructor(_firestore, _collectionId, _converter, _startAt, _endBefore) {
|
||||
this._firestore = _firestore;
|
||||
this._collectionId = _collectionId;
|
||||
this._converter = _converter;
|
||||
this._startAt = _startAt;
|
||||
this._endBefore = _endBefore;
|
||||
this._serializer = new serializer_1.Serializer(_firestore);
|
||||
}
|
||||
/**
|
||||
* The cursor that defines the first result for this partition or `undefined`
|
||||
* if this is the first partition. The cursor value must be
|
||||
* destructured when passed to `startAt()` (for example with
|
||||
* `query.startAt(...queryPartition.startAt)`).
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* const query = firestore.collectionGroup('collectionId');
|
||||
* for await (const partition of query.getPartitions(42)) {
|
||||
* let partitionedQuery = query.orderBy(FieldPath.documentId());
|
||||
* if (partition.startAt) {
|
||||
* partitionedQuery = partitionedQuery.startAt(...partition.startAt);
|
||||
* }
|
||||
* if (partition.endBefore) {
|
||||
* partitionedQuery = partitionedQuery.endBefore(...partition.endBefore);
|
||||
* }
|
||||
* const querySnapshot = await partitionedQuery.get();
|
||||
* console.log(`Partition contained ${querySnapshot.length} documents`);
|
||||
* }
|
||||
*
|
||||
* ```
|
||||
* @type {Array<*>}
|
||||
* @return {Array<*>} A cursor value that can be used with {@link
|
||||
* Query#startAt} or `undefined` if this is the first partition.
|
||||
*/
|
||||
get startAt() {
|
||||
if (this._startAt && !this._memoizedStartAt) {
|
||||
this._memoizedStartAt = this._startAt.map(v => this._serializer.decodeValue(v));
|
||||
}
|
||||
return this._memoizedStartAt;
|
||||
}
|
||||
/**
|
||||
* The cursor that defines the first result after this partition or
|
||||
* `undefined` if this is the last partition. The cursor value must be
|
||||
* destructured when passed to `endBefore()` (for example with
|
||||
* `query.endBefore(...queryPartition.endBefore)`).
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* const query = firestore.collectionGroup('collectionId');
|
||||
* for await (const partition of query.getPartitions(42)) {
|
||||
* let partitionedQuery = query.orderBy(FieldPath.documentId());
|
||||
* if (partition.startAt) {
|
||||
* partitionedQuery = partitionedQuery.startAt(...partition.startAt);
|
||||
* }
|
||||
* if (partition.endBefore) {
|
||||
* partitionedQuery = partitionedQuery.endBefore(...partition.endBefore);
|
||||
* }
|
||||
* const querySnapshot = await partitionedQuery.get();
|
||||
* console.log(`Partition contained ${querySnapshot.length} documents`);
|
||||
* }
|
||||
*
|
||||
* ```
|
||||
* @type {Array<*>}
|
||||
* @return {Array<*>} A cursor value that can be used with {@link
|
||||
* Query#endBefore} or `undefined` if this is the last partition.
|
||||
*/
|
||||
get endBefore() {
|
||||
if (this._endBefore && !this._memoizedEndBefore) {
|
||||
this._memoizedEndBefore = this._endBefore.map(v => this._serializer.decodeValue(v));
|
||||
}
|
||||
return this._memoizedEndBefore;
|
||||
}
|
||||
/**
|
||||
* Returns a query that only encapsulates the documents for this partition.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* const query = firestore.collectionGroup('collectionId');
|
||||
* for await (const partition of query.getPartitions(42)) {
|
||||
* const partitionedQuery = partition.toQuery();
|
||||
* const querySnapshot = await partitionedQuery.get();
|
||||
* console.log(`Partition contained ${querySnapshot.length} documents`);
|
||||
* }
|
||||
*
|
||||
* ```
|
||||
* @return {Query<T>} A query partitioned by a {@link Query#startAt} and
|
||||
* {@link Query#endBefore} cursor.
|
||||
*/
|
||||
toQuery() {
|
||||
// Since the api.Value to JavaScript type conversion can be lossy (unless
|
||||
// `useBigInt` is used), we pass the original protobuf representation to the
|
||||
// created query.
|
||||
let queryOptions = query_options_1.QueryOptions.forCollectionGroupQuery(this._collectionId, this._converter);
|
||||
queryOptions = queryOptions.with({
|
||||
fieldOrders: [new field_order_1.FieldOrder(path_1.FieldPath.documentId())],
|
||||
});
|
||||
if (this._startAt !== undefined) {
|
||||
queryOptions = queryOptions.with({
|
||||
startAt: { before: true, values: this._startAt },
|
||||
});
|
||||
}
|
||||
if (this._endBefore !== undefined) {
|
||||
queryOptions = queryOptions.with({
|
||||
endAt: { before: true, values: this._endBefore },
|
||||
});
|
||||
}
|
||||
return new query_1.Query(this._firestore, queryOptions);
|
||||
}
|
||||
}
|
||||
exports.QueryPartition = QueryPartition;
|
||||
//# sourceMappingURL=query-partition.js.map
|
||||
94
server/node_modules/@google-cloud/firestore/build/src/query-profile.d.ts
generated
vendored
Normal file
94
server/node_modules/@google-cloud/firestore/build/src/query-profile.d.ts
generated
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
/*!
|
||||
* Copyright 2024 Google LLC. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import * as firestore from '@google-cloud/firestore';
|
||||
import { google } from '../protos/firestore_v1_proto_api';
|
||||
import { Serializer } from './serializer';
|
||||
import IPlanSummary = google.firestore.v1.IPlanSummary;
|
||||
import IExecutionStats = google.firestore.v1.IExecutionStats;
|
||||
import IExplainMetrics = google.firestore.v1.IExplainMetrics;
|
||||
/**
|
||||
* PlanSummary contains information about the planning stage of a query.
|
||||
*
|
||||
* @class PlanSummary
|
||||
*/
|
||||
export declare class PlanSummary implements firestore.PlanSummary {
|
||||
readonly indexesUsed: Record<string, unknown>[];
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
constructor(indexesUsed: Record<string, unknown>[]);
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
static _fromProto(plan: IPlanSummary | null | undefined, serializer: Serializer): PlanSummary;
|
||||
}
|
||||
/**
|
||||
* ExecutionStats contains information about the execution of a query.
|
||||
*
|
||||
* @class ExecutionStats
|
||||
*/
|
||||
export declare class ExecutionStats implements firestore.ExecutionStats {
|
||||
readonly resultsReturned: number;
|
||||
readonly executionDuration: firestore.Duration;
|
||||
readonly readOperations: number;
|
||||
readonly debugStats: Record<string, unknown>;
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
constructor(resultsReturned: number, executionDuration: firestore.Duration, readOperations: number, debugStats: Record<string, unknown>);
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
static _fromProto(stats: IExecutionStats | null | undefined, serializer: Serializer): ExecutionStats | null;
|
||||
}
|
||||
/**
|
||||
* ExplainMetrics contains information about planning and execution of a query.
|
||||
*
|
||||
* @class ExplainMetrics
|
||||
*/
|
||||
export declare class ExplainMetrics implements firestore.ExplainMetrics {
|
||||
readonly planSummary: PlanSummary;
|
||||
readonly executionStats: ExecutionStats | null;
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
constructor(planSummary: PlanSummary, executionStats: ExecutionStats | null);
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
static _fromProto(metrics: IExplainMetrics, serializer: Serializer): ExplainMetrics;
|
||||
}
|
||||
/**
|
||||
* ExplainResults contains information about planning, execution, and results
|
||||
* of a query.
|
||||
*
|
||||
* @class ExplainResults
|
||||
*/
|
||||
export declare class ExplainResults<T> implements firestore.ExplainResults<T> {
|
||||
readonly metrics: ExplainMetrics;
|
||||
readonly snapshot: T | null;
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
constructor(metrics: ExplainMetrics, snapshot: T | null);
|
||||
}
|
||||
119
server/node_modules/@google-cloud/firestore/build/src/query-profile.js
generated
vendored
Normal file
119
server/node_modules/@google-cloud/firestore/build/src/query-profile.js
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
"use strict";
|
||||
/*!
|
||||
* Copyright 2024 Google LLC. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ExplainResults = exports.ExplainMetrics = exports.ExecutionStats = exports.PlanSummary = void 0;
|
||||
/**
|
||||
* PlanSummary contains information about the planning stage of a query.
|
||||
*
|
||||
* @class PlanSummary
|
||||
*/
|
||||
class PlanSummary {
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
constructor(indexesUsed) {
|
||||
this.indexesUsed = indexesUsed;
|
||||
}
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
static _fromProto(plan, serializer) {
|
||||
const indexes = [];
|
||||
if (plan && plan.indexesUsed) {
|
||||
for (const index of plan.indexesUsed) {
|
||||
indexes.push(serializer.decodeGoogleProtobufStruct(index));
|
||||
}
|
||||
}
|
||||
return new PlanSummary(indexes);
|
||||
}
|
||||
}
|
||||
exports.PlanSummary = PlanSummary;
|
||||
/**
|
||||
* ExecutionStats contains information about the execution of a query.
|
||||
*
|
||||
* @class ExecutionStats
|
||||
*/
|
||||
class ExecutionStats {
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
constructor(resultsReturned, executionDuration, readOperations, debugStats) {
|
||||
this.resultsReturned = resultsReturned;
|
||||
this.executionDuration = executionDuration;
|
||||
this.readOperations = readOperations;
|
||||
this.debugStats = debugStats;
|
||||
}
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
static _fromProto(stats, serializer) {
|
||||
var _a, _b;
|
||||
if (stats) {
|
||||
return new ExecutionStats(Number(stats.resultsReturned), {
|
||||
seconds: Number((_a = stats.executionDuration) === null || _a === void 0 ? void 0 : _a.seconds),
|
||||
nanoseconds: Number((_b = stats.executionDuration) === null || _b === void 0 ? void 0 : _b.nanos),
|
||||
}, Number(stats.readOperations), serializer.decodeGoogleProtobufStruct(stats.debugStats));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
exports.ExecutionStats = ExecutionStats;
|
||||
/**
|
||||
* ExplainMetrics contains information about planning and execution of a query.
|
||||
*
|
||||
* @class ExplainMetrics
|
||||
*/
|
||||
class ExplainMetrics {
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
constructor(planSummary, executionStats) {
|
||||
this.planSummary = planSummary;
|
||||
this.executionStats = executionStats;
|
||||
}
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
static _fromProto(metrics, serializer) {
|
||||
return new ExplainMetrics(PlanSummary._fromProto(metrics.planSummary, serializer), ExecutionStats._fromProto(metrics.executionStats, serializer));
|
||||
}
|
||||
}
|
||||
exports.ExplainMetrics = ExplainMetrics;
|
||||
/**
|
||||
* ExplainResults contains information about planning, execution, and results
|
||||
* of a query.
|
||||
*
|
||||
* @class ExplainResults
|
||||
*/
|
||||
class ExplainResults {
|
||||
/**
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
constructor(metrics, snapshot) {
|
||||
this.metrics = metrics;
|
||||
this.snapshot = snapshot;
|
||||
}
|
||||
}
|
||||
exports.ExplainResults = ExplainResults;
|
||||
//# sourceMappingURL=query-profile.js.map
|
||||
75
server/node_modules/@google-cloud/firestore/build/src/rate-limiter.d.ts
generated
vendored
Normal file
75
server/node_modules/@google-cloud/firestore/build/src/rate-limiter.d.ts
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
/**
|
||||
* A helper that uses the Token Bucket algorithm to rate limit the number of
|
||||
* operations that can be made in a second.
|
||||
*
|
||||
* Before a given request containing a number of operations can proceed,
|
||||
* RateLimiter determines doing so stays under the provided rate limits. It can
|
||||
* also determine how much time is required before a request can be made.
|
||||
*
|
||||
* RateLimiter can also implement a gradually increasing rate limit. This is
|
||||
* used to enforce the 500/50/5 rule
|
||||
* (https://firebase.google.com/docs/firestore/best-practices#ramping_up_traffic).
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare class RateLimiter {
|
||||
private readonly initialCapacity;
|
||||
private readonly multiplier;
|
||||
private readonly multiplierMillis;
|
||||
readonly maximumCapacity: number;
|
||||
private readonly startTimeMillis;
|
||||
availableTokens: number;
|
||||
lastRefillTimeMillis: number;
|
||||
previousCapacity: number;
|
||||
/**
|
||||
* @param initialCapacity Initial maximum number of operations per second.
|
||||
* @param multiplier Rate by which to increase the capacity.
|
||||
* @param multiplierMillis How often the capacity should increase in
|
||||
* milliseconds.
|
||||
* @param maximumCapacity Maximum number of allowed operations per second.
|
||||
* The number of tokens added per second will never exceed this number.
|
||||
* @param startTimeMillis The starting time in epoch milliseconds that the
|
||||
* rate limit is based on. Used for testing the limiter.
|
||||
*/
|
||||
constructor(initialCapacity: number, multiplier: number, multiplierMillis: number, maximumCapacity: number, startTimeMillis?: number);
|
||||
/**
|
||||
* Tries to make the number of operations. Returns true if the request
|
||||
* succeeded and false otherwise.
|
||||
*
|
||||
* @param requestTimeMillis The time used to calculate the number of available
|
||||
* tokens. Used for testing the limiter.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
tryMakeRequest(numOperations: number, requestTimeMillis?: number): boolean;
|
||||
/**
|
||||
* Returns the number of ms needed to make a request with the provided number
|
||||
* of operations. Returns 0 if the request can be made with the existing
|
||||
* capacity. Returns -1 if the request is not possible with the current
|
||||
* capacity.
|
||||
*
|
||||
* @param requestTimeMillis The time used to calculate the number of available
|
||||
* tokens. Used for testing the limiter.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
getNextRequestDelayMs(numOperations: number, requestTimeMillis?: number): number;
|
||||
/**
|
||||
* Refills the number of available tokens based on how much time has elapsed
|
||||
* since the last time the tokens were refilled.
|
||||
*
|
||||
* @param requestTimeMillis The time used to calculate the number of available
|
||||
* tokens. Used for testing the limiter.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private refillTokens;
|
||||
/**
|
||||
* Calculates the maximum capacity based on the provided date.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
calculateCapacity(requestTimeMillis: number): number;
|
||||
}
|
||||
139
server/node_modules/@google-cloud/firestore/build/src/rate-limiter.js
generated
vendored
Normal file
139
server/node_modules/@google-cloud/firestore/build/src/rate-limiter.js
generated
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.RateLimiter = void 0;
|
||||
/*!
|
||||
* Copyright 2020 Google LLC
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
const assert = require("assert");
|
||||
const logger_1 = require("./logger");
|
||||
/**
|
||||
* A helper that uses the Token Bucket algorithm to rate limit the number of
|
||||
* operations that can be made in a second.
|
||||
*
|
||||
* Before a given request containing a number of operations can proceed,
|
||||
* RateLimiter determines doing so stays under the provided rate limits. It can
|
||||
* also determine how much time is required before a request can be made.
|
||||
*
|
||||
* RateLimiter can also implement a gradually increasing rate limit. This is
|
||||
* used to enforce the 500/50/5 rule
|
||||
* (https://firebase.google.com/docs/firestore/best-practices#ramping_up_traffic).
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class RateLimiter {
|
||||
/**
|
||||
* @param initialCapacity Initial maximum number of operations per second.
|
||||
* @param multiplier Rate by which to increase the capacity.
|
||||
* @param multiplierMillis How often the capacity should increase in
|
||||
* milliseconds.
|
||||
* @param maximumCapacity Maximum number of allowed operations per second.
|
||||
* The number of tokens added per second will never exceed this number.
|
||||
* @param startTimeMillis The starting time in epoch milliseconds that the
|
||||
* rate limit is based on. Used for testing the limiter.
|
||||
*/
|
||||
constructor(initialCapacity, multiplier, multiplierMillis, maximumCapacity, startTimeMillis = Date.now()) {
|
||||
this.initialCapacity = initialCapacity;
|
||||
this.multiplier = multiplier;
|
||||
this.multiplierMillis = multiplierMillis;
|
||||
this.maximumCapacity = maximumCapacity;
|
||||
this.startTimeMillis = startTimeMillis;
|
||||
this.availableTokens = initialCapacity;
|
||||
this.lastRefillTimeMillis = startTimeMillis;
|
||||
this.previousCapacity = initialCapacity;
|
||||
}
|
||||
/**
|
||||
* Tries to make the number of operations. Returns true if the request
|
||||
* succeeded and false otherwise.
|
||||
*
|
||||
* @param requestTimeMillis The time used to calculate the number of available
|
||||
* tokens. Used for testing the limiter.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
tryMakeRequest(numOperations, requestTimeMillis = Date.now()) {
|
||||
this.refillTokens(requestTimeMillis);
|
||||
if (numOperations <= this.availableTokens) {
|
||||
this.availableTokens -= numOperations;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
/**
|
||||
* Returns the number of ms needed to make a request with the provided number
|
||||
* of operations. Returns 0 if the request can be made with the existing
|
||||
* capacity. Returns -1 if the request is not possible with the current
|
||||
* capacity.
|
||||
*
|
||||
* @param requestTimeMillis The time used to calculate the number of available
|
||||
* tokens. Used for testing the limiter.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
getNextRequestDelayMs(numOperations, requestTimeMillis = Date.now()) {
|
||||
this.refillTokens(requestTimeMillis);
|
||||
if (numOperations < this.availableTokens) {
|
||||
return 0;
|
||||
}
|
||||
const capacity = this.calculateCapacity(requestTimeMillis);
|
||||
if (capacity < numOperations) {
|
||||
return -1;
|
||||
}
|
||||
const requiredTokens = numOperations - this.availableTokens;
|
||||
return Math.ceil((requiredTokens * 1000) / capacity);
|
||||
}
|
||||
/**
|
||||
* Refills the number of available tokens based on how much time has elapsed
|
||||
* since the last time the tokens were refilled.
|
||||
*
|
||||
* @param requestTimeMillis The time used to calculate the number of available
|
||||
* tokens. Used for testing the limiter.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
refillTokens(requestTimeMillis) {
|
||||
if (requestTimeMillis >= this.lastRefillTimeMillis) {
|
||||
const elapsedTime = requestTimeMillis - this.lastRefillTimeMillis;
|
||||
const capacity = this.calculateCapacity(requestTimeMillis);
|
||||
const tokensToAdd = Math.floor((elapsedTime * capacity) / 1000);
|
||||
if (tokensToAdd > 0) {
|
||||
this.availableTokens = Math.min(capacity, this.availableTokens + tokensToAdd);
|
||||
this.lastRefillTimeMillis = requestTimeMillis;
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw new Error('Request time should not be before the last token refill time.');
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Calculates the maximum capacity based on the provided date.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
// Visible for testing.
|
||||
calculateCapacity(requestTimeMillis) {
|
||||
assert(requestTimeMillis >= this.startTimeMillis, 'startTime cannot be after currentTime');
|
||||
const millisElapsed = requestTimeMillis - this.startTimeMillis;
|
||||
const operationsPerSecond = Math.min(Math.floor(Math.pow(this.multiplier, Math.floor(millisElapsed / this.multiplierMillis)) * this.initialCapacity), this.maximumCapacity);
|
||||
if (operationsPerSecond !== this.previousCapacity) {
|
||||
(0, logger_1.logger)('RateLimiter.calculateCapacity', null, `New request capacity: ${operationsPerSecond} operations per second.`);
|
||||
}
|
||||
this.previousCapacity = operationsPerSecond;
|
||||
return operationsPerSecond;
|
||||
}
|
||||
}
|
||||
exports.RateLimiter = RateLimiter;
|
||||
//# sourceMappingURL=rate-limiter.js.map
|
||||
166
server/node_modules/@google-cloud/firestore/build/src/recursive-delete.d.ts
generated
vendored
Normal file
166
server/node_modules/@google-cloud/firestore/build/src/recursive-delete.d.ts
generated
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
/*!
|
||||
* Copyright 2021 Google LLC
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import * as firestore from '@google-cloud/firestore';
|
||||
import Firestore, { BulkWriter } from '.';
|
||||
/*!
|
||||
* Datastore allowed numeric IDs where Firestore only allows strings. Numeric
|
||||
* IDs are exposed to Firestore as __idNUM__, so this is the lowest possible
|
||||
* negative numeric value expressed in that format.
|
||||
*
|
||||
* This constant is used to specify startAt/endAt values when querying for all
|
||||
* descendants in a single collection.
|
||||
*/
|
||||
export declare const REFERENCE_NAME_MIN_ID = "__id-9223372036854775808__";
|
||||
/*!
|
||||
* The query limit used for recursive deletes when fetching all descendants of
|
||||
* the specified reference to delete. This is done to prevent the query stream
|
||||
* from streaming documents faster than Firestore can delete.
|
||||
*/
|
||||
export declare const RECURSIVE_DELETE_MAX_PENDING_OPS = 5000;
|
||||
/*!
|
||||
* The number of pending BulkWriter operations at which RecursiveDelete
|
||||
* starts the next limit query to fetch descendants. By starting the query
|
||||
* while there are pending operations, Firestore can improve BulkWriter
|
||||
* throughput. This helps prevent BulkWriter from idling while Firestore
|
||||
* fetches the next query.
|
||||
*/
|
||||
export declare const RECURSIVE_DELETE_MIN_PENDING_OPS = 1000;
|
||||
/**
|
||||
* Class used to store state required for running a recursive delete operation.
|
||||
* Each recursive delete call should use a new instance of the class.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
export declare class RecursiveDelete {
|
||||
private readonly firestore;
|
||||
private readonly writer;
|
||||
private readonly ref;
|
||||
private readonly maxLimit;
|
||||
private readonly minLimit;
|
||||
/**
|
||||
* The number of deletes that failed with a permanent error.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private errorCount;
|
||||
/**
|
||||
* The most recently thrown error. Used to populate the developer-facing
|
||||
* error message when the recursive delete operation completes.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private lastError;
|
||||
/**
|
||||
* Whether there are still documents to delete that still need to be fetched.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private documentsPending;
|
||||
/**
|
||||
* Whether run() has been called.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private started;
|
||||
/**
|
||||
* Query limit to use when fetching all descendants.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private readonly maxPendingOps;
|
||||
/**
|
||||
* The number of pending BulkWriter operations at which RecursiveDelete
|
||||
* starts the next limit query to fetch descendants.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private readonly minPendingOps;
|
||||
/**
|
||||
* A deferred promise that resolves when the recursive delete operation
|
||||
* is completed.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private readonly completionDeferred;
|
||||
/**
|
||||
* Whether a query stream is currently in progress. Only one stream can be
|
||||
* run at a time.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private streamInProgress;
|
||||
/**
|
||||
* The last document snapshot returned by the stream. Used to set the
|
||||
* startAfter() field in the subsequent stream.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private lastDocumentSnap;
|
||||
/**
|
||||
* The number of pending BulkWriter operations. Used to determine when the
|
||||
* next query can be run.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private pendingOpsCount;
|
||||
private errorStack;
|
||||
/**
|
||||
*
|
||||
* @param firestore The Firestore instance to use.
|
||||
* @param writer The BulkWriter instance to use for delete operations.
|
||||
* @param ref The document or collection reference to recursively delete.
|
||||
* @param maxLimit The query limit to use when fetching descendants
|
||||
* @param minLimit The number of pending BulkWriter operations at which
|
||||
* RecursiveDelete starts the next limit query to fetch descendants.
|
||||
*/
|
||||
constructor(firestore: Firestore, writer: BulkWriter, ref: firestore.CollectionReference<unknown> | firestore.DocumentReference<unknown>, maxLimit: number, minLimit: number);
|
||||
/**
|
||||
* Recursively deletes the reference provided in the class constructor.
|
||||
* Returns a promise that resolves when all descendants have been deleted, or
|
||||
* if an error occurs.
|
||||
*/
|
||||
run(): Promise<void>;
|
||||
/**
|
||||
* Creates a query stream and attaches event handlers to it.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private setupStream;
|
||||
/**
|
||||
* Retrieves all descendant documents nested under the provided reference.
|
||||
* @param ref The reference to fetch all descendants for.
|
||||
* @private
|
||||
* @internal
|
||||
* @return {Stream<QueryDocumentSnapshot>} Stream of descendant documents.
|
||||
*/
|
||||
private getAllDescendants;
|
||||
/**
|
||||
* Called when all descendants of the provided reference have been streamed
|
||||
* or if a permanent error occurs during the stream. Deletes the developer
|
||||
* provided reference and wraps any errors that occurred.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private onQueryEnd;
|
||||
/**
|
||||
* Deletes the provided reference and starts the next stream if conditions
|
||||
* are met.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
private deleteRef;
|
||||
private incrementErrorCount;
|
||||
}
|
||||
251
server/node_modules/@google-cloud/firestore/build/src/recursive-delete.js
generated
vendored
Normal file
251
server/node_modules/@google-cloud/firestore/build/src/recursive-delete.js
generated
vendored
Normal file
@@ -0,0 +1,251 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.RecursiveDelete = exports.RECURSIVE_DELETE_MIN_PENDING_OPS = exports.RECURSIVE_DELETE_MAX_PENDING_OPS = exports.REFERENCE_NAME_MIN_ID = void 0;
|
||||
const assert = require("assert");
|
||||
const _1 = require(".");
|
||||
const util_1 = require("./util");
|
||||
const query_options_1 = require("./reference/query-options");
|
||||
/*!
|
||||
* Datastore allowed numeric IDs where Firestore only allows strings. Numeric
|
||||
* IDs are exposed to Firestore as __idNUM__, so this is the lowest possible
|
||||
* negative numeric value expressed in that format.
|
||||
*
|
||||
* This constant is used to specify startAt/endAt values when querying for all
|
||||
* descendants in a single collection.
|
||||
*/
|
||||
exports.REFERENCE_NAME_MIN_ID = '__id-9223372036854775808__';
|
||||
/*!
|
||||
* The query limit used for recursive deletes when fetching all descendants of
|
||||
* the specified reference to delete. This is done to prevent the query stream
|
||||
* from streaming documents faster than Firestore can delete.
|
||||
*/
|
||||
// Visible for testing.
|
||||
exports.RECURSIVE_DELETE_MAX_PENDING_OPS = 5000;
|
||||
/*!
|
||||
* The number of pending BulkWriter operations at which RecursiveDelete
|
||||
* starts the next limit query to fetch descendants. By starting the query
|
||||
* while there are pending operations, Firestore can improve BulkWriter
|
||||
* throughput. This helps prevent BulkWriter from idling while Firestore
|
||||
* fetches the next query.
|
||||
*/
|
||||
exports.RECURSIVE_DELETE_MIN_PENDING_OPS = 1000;
|
||||
/**
|
||||
* Class used to store state required for running a recursive delete operation.
|
||||
* Each recursive delete call should use a new instance of the class.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
class RecursiveDelete {
|
||||
/**
|
||||
*
|
||||
* @param firestore The Firestore instance to use.
|
||||
* @param writer The BulkWriter instance to use for delete operations.
|
||||
* @param ref The document or collection reference to recursively delete.
|
||||
* @param maxLimit The query limit to use when fetching descendants
|
||||
* @param minLimit The number of pending BulkWriter operations at which
|
||||
* RecursiveDelete starts the next limit query to fetch descendants.
|
||||
*/
|
||||
constructor(firestore, writer, ref, maxLimit, minLimit) {
|
||||
this.firestore = firestore;
|
||||
this.writer = writer;
|
||||
this.ref = ref;
|
||||
this.maxLimit = maxLimit;
|
||||
this.minLimit = minLimit;
|
||||
/**
|
||||
* The number of deletes that failed with a permanent error.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this.errorCount = 0;
|
||||
/**
|
||||
* Whether there are still documents to delete that still need to be fetched.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this.documentsPending = true;
|
||||
/**
|
||||
* Whether run() has been called.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this.started = false;
|
||||
/**
|
||||
* A deferred promise that resolves when the recursive delete operation
|
||||
* is completed.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this.completionDeferred = new util_1.Deferred();
|
||||
/**
|
||||
* Whether a query stream is currently in progress. Only one stream can be
|
||||
* run at a time.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this.streamInProgress = false;
|
||||
/**
|
||||
* The number of pending BulkWriter operations. Used to determine when the
|
||||
* next query can be run.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
this.pendingOpsCount = 0;
|
||||
this.errorStack = '';
|
||||
this.maxPendingOps = maxLimit;
|
||||
this.minPendingOps = minLimit;
|
||||
}
|
||||
/**
|
||||
* Recursively deletes the reference provided in the class constructor.
|
||||
* Returns a promise that resolves when all descendants have been deleted, or
|
||||
* if an error occurs.
|
||||
*/
|
||||
run() {
|
||||
assert(!this.started, 'RecursiveDelete.run() should only be called once.');
|
||||
// Capture the error stack to preserve stack tracing across async calls.
|
||||
this.errorStack = Error().stack;
|
||||
this.writer._verifyNotClosed();
|
||||
this.setupStream();
|
||||
return this.completionDeferred.promise;
|
||||
}
|
||||
/**
|
||||
* Creates a query stream and attaches event handlers to it.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
setupStream() {
|
||||
const stream = this.getAllDescendants(this.ref instanceof _1.CollectionReference
|
||||
? this.ref
|
||||
: this.ref);
|
||||
this.streamInProgress = true;
|
||||
let streamedDocsCount = 0;
|
||||
stream
|
||||
.on('error', err => {
|
||||
err.code = 14 /* StatusCode.UNAVAILABLE */;
|
||||
err.stack = 'Failed to fetch children documents: ' + err.stack;
|
||||
this.lastError = err;
|
||||
this.onQueryEnd();
|
||||
})
|
||||
.on('data', (snap) => {
|
||||
streamedDocsCount++;
|
||||
this.lastDocumentSnap = snap;
|
||||
this.deleteRef(snap.ref);
|
||||
})
|
||||
.on('end', () => {
|
||||
this.streamInProgress = false;
|
||||
// If there are fewer than the number of documents specified in the
|
||||
// limit() field, we know that the query is complete.
|
||||
if (streamedDocsCount < this.minPendingOps) {
|
||||
this.onQueryEnd();
|
||||
}
|
||||
else if (this.pendingOpsCount === 0) {
|
||||
this.setupStream();
|
||||
}
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Retrieves all descendant documents nested under the provided reference.
|
||||
* @param ref The reference to fetch all descendants for.
|
||||
* @private
|
||||
* @internal
|
||||
* @return {Stream<QueryDocumentSnapshot>} Stream of descendant documents.
|
||||
*/
|
||||
getAllDescendants(ref) {
|
||||
// The parent is the closest ancestor document to the location we're
|
||||
// deleting. If we are deleting a document, the parent is the path of that
|
||||
// document. If we are deleting a collection, the parent is the path of the
|
||||
// document containing that collection (or the database root, if it is a
|
||||
// root collection).
|
||||
let parentPath = ref._resourcePath;
|
||||
if (ref instanceof _1.CollectionReference) {
|
||||
parentPath = parentPath.popLast();
|
||||
}
|
||||
const collectionId = ref instanceof _1.CollectionReference
|
||||
? ref.id
|
||||
: ref.parent.id;
|
||||
let query = new _1.Query(this.firestore, query_options_1.QueryOptions.forKindlessAllDescendants(parentPath, collectionId,
|
||||
/* requireConsistency= */ false));
|
||||
// Query for names only to fetch empty snapshots.
|
||||
query = query.select(_1.FieldPath.documentId()).limit(this.maxPendingOps);
|
||||
if (ref instanceof _1.CollectionReference) {
|
||||
// To find all descendants of a collection reference, we need to use a
|
||||
// composite filter that captures all documents that start with the
|
||||
// collection prefix. The MIN_KEY constant represents the minimum key in
|
||||
// this collection, and a null byte + the MIN_KEY represents the minimum
|
||||
// key is the next possible collection.
|
||||
const nullChar = String.fromCharCode(0);
|
||||
const startAt = collectionId + '/' + exports.REFERENCE_NAME_MIN_ID;
|
||||
const endAt = collectionId + nullChar + '/' + exports.REFERENCE_NAME_MIN_ID;
|
||||
query = query
|
||||
.where(_1.FieldPath.documentId(), '>=', startAt)
|
||||
.where(_1.FieldPath.documentId(), '<', endAt);
|
||||
}
|
||||
if (this.lastDocumentSnap) {
|
||||
query = query.startAfter(this.lastDocumentSnap);
|
||||
}
|
||||
return query.stream();
|
||||
}
|
||||
/**
|
||||
* Called when all descendants of the provided reference have been streamed
|
||||
* or if a permanent error occurs during the stream. Deletes the developer
|
||||
* provided reference and wraps any errors that occurred.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
onQueryEnd() {
|
||||
this.documentsPending = false;
|
||||
if (this.ref instanceof _1.DocumentReference) {
|
||||
this.writer.delete(this.ref).catch(err => this.incrementErrorCount(err));
|
||||
}
|
||||
this.writer.flush().then(async () => {
|
||||
var _a;
|
||||
if (this.lastError === undefined) {
|
||||
this.completionDeferred.resolve();
|
||||
}
|
||||
else {
|
||||
let error = new (require('google-gax/build/src/fallback').GoogleError)(`${this.errorCount} ` +
|
||||
`${this.errorCount !== 1 ? 'deletes' : 'delete'} ` +
|
||||
'failed. The last delete failed with: ');
|
||||
if (this.lastError.code !== undefined) {
|
||||
error.code = this.lastError.code;
|
||||
}
|
||||
error = (0, util_1.wrapError)(error, this.errorStack);
|
||||
// Wrap the BulkWriter error last to provide the full stack trace.
|
||||
this.completionDeferred.reject(this.lastError.stack
|
||||
? (0, util_1.wrapError)(error, (_a = this.lastError.stack) !== null && _a !== void 0 ? _a : '')
|
||||
: error);
|
||||
}
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Deletes the provided reference and starts the next stream if conditions
|
||||
* are met.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
deleteRef(docRef) {
|
||||
this.pendingOpsCount++;
|
||||
this.writer
|
||||
.delete(docRef)
|
||||
.catch(err => {
|
||||
this.incrementErrorCount(err);
|
||||
})
|
||||
.then(() => {
|
||||
this.pendingOpsCount--;
|
||||
// We wait until the previous stream has ended in order to sure the
|
||||
// startAfter document is correct. Starting the next stream while
|
||||
// there are pending operations allows Firestore to maximize
|
||||
// BulkWriter throughput.
|
||||
if (this.documentsPending &&
|
||||
!this.streamInProgress &&
|
||||
this.pendingOpsCount < this.minPendingOps) {
|
||||
this.setupStream();
|
||||
}
|
||||
});
|
||||
}
|
||||
incrementErrorCount(err) {
|
||||
this.errorCount++;
|
||||
this.lastError = err;
|
||||
}
|
||||
}
|
||||
exports.RecursiveDelete = RecursiveDelete;
|
||||
//# sourceMappingURL=recursive-delete.js.map
|
||||
63
server/node_modules/@google-cloud/firestore/build/src/reference/aggregate-query-snapshot.d.ts
generated
vendored
Normal file
63
server/node_modules/@google-cloud/firestore/build/src/reference/aggregate-query-snapshot.d.ts
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
/**
|
||||
* Copyright 2024 Google LLC. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import * as firestore from '@google-cloud/firestore';
|
||||
import { Timestamp } from '../timestamp';
|
||||
import { AggregateQuery } from './aggregate-query';
|
||||
/**
|
||||
* The results of executing an aggregation query.
|
||||
*/
|
||||
export declare class AggregateQuerySnapshot<AggregateSpecType extends firestore.AggregateSpec, AppModelType = firestore.DocumentData, DbModelType extends firestore.DocumentData = firestore.DocumentData> implements firestore.AggregateQuerySnapshot<AggregateSpecType, AppModelType, DbModelType> {
|
||||
private readonly _query;
|
||||
private readonly _readTime;
|
||||
private readonly _data;
|
||||
/**
|
||||
* @internal
|
||||
*
|
||||
* @param _query The query that was executed to produce this result.
|
||||
* @param _readTime The time this snapshot was read.
|
||||
* @param _data The results of the aggregations performed over the underlying
|
||||
* query.
|
||||
*/
|
||||
constructor(_query: AggregateQuery<AggregateSpecType, AppModelType, DbModelType>, _readTime: Timestamp, _data: firestore.AggregateSpecData<AggregateSpecType>);
|
||||
/** The query that was executed to produce this result. */
|
||||
get query(): AggregateQuery<AggregateSpecType, AppModelType, DbModelType>;
|
||||
/** The time this snapshot was read. */
|
||||
get readTime(): Timestamp;
|
||||
/**
|
||||
* Returns the results of the aggregations performed over the underlying
|
||||
* query.
|
||||
*
|
||||
* The keys of the returned object will be the same as those of the
|
||||
* `AggregateSpec` object specified to the aggregation method, and the
|
||||
* values will be the corresponding aggregation result.
|
||||
*
|
||||
* @returns The results of the aggregations performed over the underlying
|
||||
* query.
|
||||
*/
|
||||
data(): firestore.AggregateSpecData<AggregateSpecType>;
|
||||
/**
|
||||
* Compares this object with the given object for equality.
|
||||
*
|
||||
* Two `AggregateQuerySnapshot` instances are considered "equal" if they
|
||||
* have the same data and their underlying queries compare "equal" using
|
||||
* `AggregateQuery.isEqual()`.
|
||||
*
|
||||
* @param other The object to compare to this object for equality.
|
||||
* @return `true` if this object is "equal" to the given object, as
|
||||
* defined above, or `false` otherwise.
|
||||
*/
|
||||
isEqual(other: firestore.AggregateQuerySnapshot<AggregateSpecType, AppModelType, DbModelType>): boolean;
|
||||
}
|
||||
87
server/node_modules/@google-cloud/firestore/build/src/reference/aggregate-query-snapshot.js
generated
vendored
Normal file
87
server/node_modules/@google-cloud/firestore/build/src/reference/aggregate-query-snapshot.js
generated
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
"use strict";
|
||||
/**
|
||||
* Copyright 2024 Google LLC. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.AggregateQuerySnapshot = void 0;
|
||||
const deepEqual = require("fast-deep-equal");
|
||||
/**
|
||||
* The results of executing an aggregation query.
|
||||
*/
|
||||
class AggregateQuerySnapshot {
|
||||
/**
|
||||
* @internal
|
||||
*
|
||||
* @param _query The query that was executed to produce this result.
|
||||
* @param _readTime The time this snapshot was read.
|
||||
* @param _data The results of the aggregations performed over the underlying
|
||||
* query.
|
||||
*/
|
||||
constructor(_query, _readTime, _data) {
|
||||
this._query = _query;
|
||||
this._readTime = _readTime;
|
||||
this._data = _data;
|
||||
}
|
||||
/** The query that was executed to produce this result. */
|
||||
get query() {
|
||||
return this._query;
|
||||
}
|
||||
/** The time this snapshot was read. */
|
||||
get readTime() {
|
||||
return this._readTime;
|
||||
}
|
||||
/**
|
||||
* Returns the results of the aggregations performed over the underlying
|
||||
* query.
|
||||
*
|
||||
* The keys of the returned object will be the same as those of the
|
||||
* `AggregateSpec` object specified to the aggregation method, and the
|
||||
* values will be the corresponding aggregation result.
|
||||
*
|
||||
* @returns The results of the aggregations performed over the underlying
|
||||
* query.
|
||||
*/
|
||||
data() {
|
||||
return this._data;
|
||||
}
|
||||
/**
|
||||
* Compares this object with the given object for equality.
|
||||
*
|
||||
* Two `AggregateQuerySnapshot` instances are considered "equal" if they
|
||||
* have the same data and their underlying queries compare "equal" using
|
||||
* `AggregateQuery.isEqual()`.
|
||||
*
|
||||
* @param other The object to compare to this object for equality.
|
||||
* @return `true` if this object is "equal" to the given object, as
|
||||
* defined above, or `false` otherwise.
|
||||
*/
|
||||
isEqual(other) {
|
||||
if (this === other) {
|
||||
return true;
|
||||
}
|
||||
if (!(other instanceof AggregateQuerySnapshot)) {
|
||||
return false;
|
||||
}
|
||||
// Since the read time is different on every read, we explicitly ignore all
|
||||
// document metadata in this comparison, just like
|
||||
// `DocumentSnapshot.isEqual()` does.
|
||||
if (!this.query.isEqual(other.query)) {
|
||||
return false;
|
||||
}
|
||||
return deepEqual(this._data, other._data);
|
||||
}
|
||||
}
|
||||
exports.AggregateQuerySnapshot = AggregateQuerySnapshot;
|
||||
//# sourceMappingURL=aggregate-query-snapshot.js.map
|
||||
119
server/node_modules/@google-cloud/firestore/build/src/reference/aggregate-query.d.ts
generated
vendored
Normal file
119
server/node_modules/@google-cloud/firestore/build/src/reference/aggregate-query.d.ts
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
/**
|
||||
* Copyright 2024 Google LLC. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import * as protos from '../../protos/firestore_v1_proto_api';
|
||||
import api = protos.google.firestore.v1;
|
||||
import * as firestore from '@google-cloud/firestore';
|
||||
import { AggregateSpec } from '../aggregate';
|
||||
import { Timestamp } from '../timestamp';
|
||||
import { ExplainResults } from '../query-profile';
|
||||
import { AggregateQuerySnapshot } from './aggregate-query-snapshot';
|
||||
import { Query } from './query';
|
||||
import { Readable } from 'stream';
|
||||
import { QueryResponse, QuerySnapshotResponse } from './types';
|
||||
/**
|
||||
* A query that calculates aggregations over an underlying query.
|
||||
*/
|
||||
export declare class AggregateQuery<AggregateSpecType extends AggregateSpec, AppModelType = firestore.DocumentData, DbModelType extends firestore.DocumentData = firestore.DocumentData> implements firestore.AggregateQuery<AggregateSpecType, AppModelType, DbModelType> {
|
||||
private readonly _query;
|
||||
private readonly _aggregates;
|
||||
private readonly clientAliasToServerAliasMap;
|
||||
private readonly serverAliasToClientAliasMap;
|
||||
/**
|
||||
* @internal
|
||||
* @param _query The query whose aggregations will be calculated by this
|
||||
* object.
|
||||
* @param _aggregates The aggregations that will be performed by this query.
|
||||
*/
|
||||
constructor(_query: Query<AppModelType, DbModelType>, _aggregates: AggregateSpecType);
|
||||
/** The query whose aggregations will be calculated by this object. */
|
||||
get query(): Query<AppModelType, DbModelType>;
|
||||
/**
|
||||
* Executes this query.
|
||||
*
|
||||
* @return A promise that will be resolved with the results of the query.
|
||||
*/
|
||||
get(): Promise<AggregateQuerySnapshot<AggregateSpecType, AppModelType, DbModelType>>;
|
||||
/**
|
||||
* Internal get() method that accepts an optional transaction options and
|
||||
* returns a snapshot with transaction and explain metadata.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param transactionOrReadTime A transaction ID, options to start a new
|
||||
* transaction, or timestamp to use as read time.
|
||||
*/
|
||||
_get(transactionOrReadTime?: Uint8Array | Timestamp | api.ITransactionOptions): Promise<QuerySnapshotResponse<AggregateQuerySnapshot<AggregateSpecType, AppModelType, DbModelType>>>;
|
||||
/**
|
||||
* Internal get() method that accepts an optional transaction id, and returns
|
||||
* transaction metadata.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param transactionOrReadTime A transaction ID, options to start a new
|
||||
* transaction, or timestamp to use as read time.
|
||||
*/
|
||||
_getResponse(transactionOrReadTime?: Uint8Array | Timestamp | api.ITransactionOptions, explainOptions?: firestore.ExplainOptions): Promise<QueryResponse<AggregateQuerySnapshot<AggregateSpecType, AppModelType, DbModelType>>>;
|
||||
/**
|
||||
* Internal streaming method that accepts an optional transaction ID.
|
||||
*
|
||||
* BEWARE: If `transactionOrReadTime` is `ITransactionOptions`, then the first
|
||||
* response in the stream will be a transaction response.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param transactionOrReadTime A transaction ID, options to start a new
|
||||
* transaction, or timestamp to use as read time.
|
||||
* @param explainOptions Options to use for explaining the query (if any).
|
||||
* @returns A stream of document results optionally preceded by a transaction response.
|
||||
*/
|
||||
_stream(transactionOrReadTime?: Uint8Array | Timestamp | api.ITransactionOptions, explainOptions?: firestore.ExplainOptions): Readable;
|
||||
/**
|
||||
* Internal method to decode values within result.
|
||||
* @private
|
||||
*/
|
||||
private decodeResult;
|
||||
/**
|
||||
* Internal method for serializing a query to its RunAggregationQuery proto
|
||||
* representation with an optional transaction id.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns Serialized JSON for the query.
|
||||
*/
|
||||
toProto(transactionOrReadTime?: Uint8Array | Timestamp | api.ITransactionOptions, explainOptions?: firestore.ExplainOptions): api.IRunAggregationQueryRequest;
|
||||
/**
|
||||
* Compares this object with the given object for equality.
|
||||
*
|
||||
* This object is considered "equal" to the other object if and only if
|
||||
* `other` performs the same aggregations as this `AggregateQuery` and
|
||||
* the underlying Query of `other` compares equal to that of this object
|
||||
* using `Query.isEqual()`.
|
||||
*
|
||||
* @param other The object to compare to this object for equality.
|
||||
* @return `true` if this object is "equal" to the given object, as
|
||||
* defined above, or `false` otherwise.
|
||||
*/
|
||||
isEqual(other: firestore.AggregateQuery<AggregateSpecType, AppModelType, DbModelType>): boolean;
|
||||
/**
|
||||
* Plans and optionally executes this query. Returns a Promise that will be
|
||||
* resolved with the planner information, statistics from the query
|
||||
* execution (if any), and the query results (if any).
|
||||
*
|
||||
* @return A Promise that will be resolved with the planner information,
|
||||
* statistics from the query execution (if any), and the query results (if any).
|
||||
*/
|
||||
explain(options?: firestore.ExplainOptions): Promise<ExplainResults<AggregateQuerySnapshot<AggregateSpecType, AppModelType, DbModelType>>>;
|
||||
}
|
||||
291
server/node_modules/@google-cloud/firestore/build/src/reference/aggregate-query.js
generated
vendored
Normal file
291
server/node_modules/@google-cloud/firestore/build/src/reference/aggregate-query.js
generated
vendored
Normal file
@@ -0,0 +1,291 @@
|
||||
"use strict";
|
||||
/**
|
||||
* Copyright 2024 Google LLC. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.AggregateQuery = void 0;
|
||||
const assert = require("assert");
|
||||
const deepEqual = require("fast-deep-equal");
|
||||
const aggregate_1 = require("../aggregate");
|
||||
const timestamp_1 = require("../timestamp");
|
||||
const util_1 = require("../util");
|
||||
const query_profile_1 = require("../query-profile");
|
||||
const logger_1 = require("../logger");
|
||||
const aggregate_query_snapshot_1 = require("./aggregate-query-snapshot");
|
||||
const stream_1 = require("stream");
|
||||
const trace_util_1 = require("../telemetry/trace-util");
|
||||
/**
|
||||
* A query that calculates aggregations over an underlying query.
|
||||
*/
|
||||
class AggregateQuery {
|
||||
/**
|
||||
* @internal
|
||||
* @param _query The query whose aggregations will be calculated by this
|
||||
* object.
|
||||
* @param _aggregates The aggregations that will be performed by this query.
|
||||
*/
|
||||
constructor(
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
_query, _aggregates) {
|
||||
this._query = _query;
|
||||
this._aggregates = _aggregates;
|
||||
this.clientAliasToServerAliasMap = {};
|
||||
this.serverAliasToClientAliasMap = {};
|
||||
// Client-side aliases may be too long and exceed the 1500-byte string size limit.
|
||||
// Such long strings do not need to be transferred over the wire either.
|
||||
// The client maps the user's alias to a short form alias and send that to the server.
|
||||
let aggregationNum = 0;
|
||||
for (const clientAlias in this._aggregates) {
|
||||
if (Object.prototype.hasOwnProperty.call(this._aggregates, clientAlias)) {
|
||||
const serverAlias = `aggregate_${aggregationNum++}`;
|
||||
this.clientAliasToServerAliasMap[clientAlias] = serverAlias;
|
||||
this.serverAliasToClientAliasMap[serverAlias] = clientAlias;
|
||||
}
|
||||
}
|
||||
}
|
||||
/** The query whose aggregations will be calculated by this object. */
|
||||
get query() {
|
||||
return this._query;
|
||||
}
|
||||
/**
|
||||
* Executes this query.
|
||||
*
|
||||
* @return A promise that will be resolved with the results of the query.
|
||||
*/
|
||||
async get() {
|
||||
return this._query._firestore._traceUtil.startActiveSpan(trace_util_1.SPAN_NAME_AGGREGATION_QUERY_GET, async () => {
|
||||
const { result } = await this._get();
|
||||
return result;
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Internal get() method that accepts an optional transaction options and
|
||||
* returns a snapshot with transaction and explain metadata.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param transactionOrReadTime A transaction ID, options to start a new
|
||||
* transaction, or timestamp to use as read time.
|
||||
*/
|
||||
async _get(transactionOrReadTime) {
|
||||
const response = await this._getResponse(transactionOrReadTime);
|
||||
if (!response.result) {
|
||||
throw new Error('No AggregateQuery results');
|
||||
}
|
||||
return response;
|
||||
}
|
||||
/**
|
||||
* Internal get() method that accepts an optional transaction id, and returns
|
||||
* transaction metadata.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param transactionOrReadTime A transaction ID, options to start a new
|
||||
* transaction, or timestamp to use as read time.
|
||||
*/
|
||||
_getResponse(transactionOrReadTime, explainOptions) {
|
||||
// Capture the error stack to preserve stack tracing across async calls.
|
||||
const stack = Error().stack;
|
||||
return new Promise((resolve, reject) => {
|
||||
const output = {};
|
||||
const stream = this._stream(transactionOrReadTime, explainOptions);
|
||||
stream.on('error', err => {
|
||||
reject((0, util_1.wrapError)(err, stack));
|
||||
});
|
||||
stream.on('data', (data) => {
|
||||
if (data.transaction) {
|
||||
output.transaction = data.transaction;
|
||||
}
|
||||
if (data.explainMetrics) {
|
||||
output.explainMetrics = data.explainMetrics;
|
||||
}
|
||||
if (data.result) {
|
||||
output.result = data.result;
|
||||
}
|
||||
});
|
||||
stream.on('end', () => {
|
||||
stream.destroy();
|
||||
resolve(output);
|
||||
});
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Internal streaming method that accepts an optional transaction ID.
|
||||
*
|
||||
* BEWARE: If `transactionOrReadTime` is `ITransactionOptions`, then the first
|
||||
* response in the stream will be a transaction response.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @param transactionOrReadTime A transaction ID, options to start a new
|
||||
* transaction, or timestamp to use as read time.
|
||||
* @param explainOptions Options to use for explaining the query (if any).
|
||||
* @returns A stream of document results optionally preceded by a transaction response.
|
||||
*/
|
||||
_stream(transactionOrReadTime, explainOptions) {
|
||||
const tag = (0, util_1.requestTag)();
|
||||
const firestore = this._query.firestore;
|
||||
const stream = new stream_1.Transform({
|
||||
objectMode: true,
|
||||
transform: (proto, enc, callback) => {
|
||||
var _a;
|
||||
const output = {};
|
||||
// Proto comes with zero-length buffer by default
|
||||
if ((_a = proto.transaction) === null || _a === void 0 ? void 0 : _a.length) {
|
||||
output.transaction = proto.transaction;
|
||||
}
|
||||
if (proto.explainMetrics) {
|
||||
output.explainMetrics = query_profile_1.ExplainMetrics._fromProto(proto.explainMetrics, firestore._serializer);
|
||||
}
|
||||
if (proto.result) {
|
||||
const readTime = timestamp_1.Timestamp.fromProto(proto.readTime);
|
||||
const data = this.decodeResult(proto.result);
|
||||
output.result = new aggregate_query_snapshot_1.AggregateQuerySnapshot(this, readTime, data);
|
||||
}
|
||||
callback(undefined, output);
|
||||
},
|
||||
});
|
||||
firestore
|
||||
.initializeIfNeeded(tag)
|
||||
.then(async () => {
|
||||
// `toProto()` might throw an exception. We rely on the behavior of an
|
||||
// async function to convert this exception into the rejected Promise we
|
||||
// catch below.
|
||||
const request = this.toProto(transactionOrReadTime, explainOptions);
|
||||
const backendStream = await firestore.requestStream('runAggregationQuery',
|
||||
/* bidirectional= */ false, request, tag);
|
||||
stream.on('close', () => {
|
||||
backendStream.resume();
|
||||
backendStream.end();
|
||||
});
|
||||
backendStream.on('error', err => {
|
||||
// TODO(group-by) When group-by queries are supported for aggregates
|
||||
// consider implementing retries if the stream is making progress
|
||||
// receiving results for groups. See the use of lastReceivedDocument
|
||||
// in the retry strategy for runQuery.
|
||||
// Also note that explain queries should not be retried.
|
||||
backendStream.unpipe(stream);
|
||||
(0, logger_1.logger)('AggregateQuery._stream', tag, 'AggregateQuery failed with stream error:', err);
|
||||
this._query._firestore._traceUtil
|
||||
.currentSpan()
|
||||
.addEvent(`${trace_util_1.SPAN_NAME_RUN_AGGREGATION_QUERY}: Error.`, {
|
||||
'error.message': err.message,
|
||||
});
|
||||
stream.destroy(err);
|
||||
});
|
||||
backendStream.resume();
|
||||
backendStream.pipe(stream);
|
||||
})
|
||||
.catch(e => stream.destroy(e));
|
||||
return stream;
|
||||
}
|
||||
/**
|
||||
* Internal method to decode values within result.
|
||||
* @private
|
||||
*/
|
||||
decodeResult(proto) {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const data = {};
|
||||
const fields = proto.aggregateFields;
|
||||
if (fields) {
|
||||
const serializer = this._query.firestore._serializer;
|
||||
for (const prop of Object.keys(fields)) {
|
||||
const alias = this.serverAliasToClientAliasMap[prop];
|
||||
assert(alias !== null && alias !== undefined, `'${prop}' not present in server-client alias mapping.`);
|
||||
if (this._aggregates[alias] === undefined) {
|
||||
throw new Error(`Unexpected alias [${prop}] in result aggregate result`);
|
||||
}
|
||||
data[alias] = serializer.decodeValue(fields[prop]);
|
||||
}
|
||||
}
|
||||
return data;
|
||||
}
|
||||
/**
|
||||
* Internal method for serializing a query to its RunAggregationQuery proto
|
||||
* representation with an optional transaction id.
|
||||
*
|
||||
* @private
|
||||
* @internal
|
||||
* @returns Serialized JSON for the query.
|
||||
*/
|
||||
toProto(transactionOrReadTime, explainOptions) {
|
||||
const queryProto = this._query.toProto();
|
||||
const runQueryRequest = {
|
||||
parent: queryProto.parent,
|
||||
structuredAggregationQuery: {
|
||||
structuredQuery: queryProto.structuredQuery,
|
||||
aggregations: (0, util_1.mapToArray)(this._aggregates, (aggregate, clientAlias) => {
|
||||
const serverAlias = this.clientAliasToServerAliasMap[clientAlias];
|
||||
assert(serverAlias !== null && serverAlias !== undefined, `'${clientAlias}' not present in client-server alias mapping.`);
|
||||
return new aggregate_1.Aggregate(serverAlias, aggregate.aggregateType, aggregate._field).toProto();
|
||||
}),
|
||||
},
|
||||
};
|
||||
if (transactionOrReadTime instanceof Uint8Array) {
|
||||
runQueryRequest.transaction = transactionOrReadTime;
|
||||
}
|
||||
else if (transactionOrReadTime instanceof timestamp_1.Timestamp) {
|
||||
runQueryRequest.readTime = transactionOrReadTime.toProto().timestampValue;
|
||||
}
|
||||
else if (transactionOrReadTime) {
|
||||
runQueryRequest.newTransaction = transactionOrReadTime;
|
||||
}
|
||||
if (explainOptions) {
|
||||
runQueryRequest.explainOptions = explainOptions;
|
||||
}
|
||||
return runQueryRequest;
|
||||
}
|
||||
/**
|
||||
* Compares this object with the given object for equality.
|
||||
*
|
||||
* This object is considered "equal" to the other object if and only if
|
||||
* `other` performs the same aggregations as this `AggregateQuery` and
|
||||
* the underlying Query of `other` compares equal to that of this object
|
||||
* using `Query.isEqual()`.
|
||||
*
|
||||
* @param other The object to compare to this object for equality.
|
||||
* @return `true` if this object is "equal" to the given object, as
|
||||
* defined above, or `false` otherwise.
|
||||
*/
|
||||
isEqual(other) {
|
||||
if (this === other) {
|
||||
return true;
|
||||
}
|
||||
if (!(other instanceof AggregateQuery)) {
|
||||
return false;
|
||||
}
|
||||
if (!this.query.isEqual(other.query)) {
|
||||
return false;
|
||||
}
|
||||
return deepEqual(this._aggregates, other._aggregates);
|
||||
}
|
||||
/**
|
||||
* Plans and optionally executes this query. Returns a Promise that will be
|
||||
* resolved with the planner information, statistics from the query
|
||||
* execution (if any), and the query results (if any).
|
||||
*
|
||||
* @return A Promise that will be resolved with the planner information,
|
||||
* statistics from the query execution (if any), and the query results (if any).
|
||||
*/
|
||||
async explain(options) {
|
||||
const { result, explainMetrics } = await this._getResponse(undefined, options || {});
|
||||
if (!explainMetrics) {
|
||||
throw new Error('No explain results');
|
||||
}
|
||||
return new query_profile_1.ExplainResults(explainMetrics, result || null);
|
||||
}
|
||||
}
|
||||
exports.AggregateQuery = AggregateQuery;
|
||||
//# sourceMappingURL=aggregate-query.js.map
|
||||
150
server/node_modules/@google-cloud/firestore/build/src/reference/collection-reference.d.ts
generated
vendored
Normal file
150
server/node_modules/@google-cloud/firestore/build/src/reference/collection-reference.d.ts
generated
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
/**
|
||||
* Copyright 2024 Google LLC. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import * as firestore from '@google-cloud/firestore';
|
||||
import { ResourcePath } from '../path';
|
||||
import { Query } from './query';
|
||||
import Firestore from '../index';
|
||||
import { DocumentReference } from './document-reference';
|
||||
/**
|
||||
* A CollectionReference object can be used for adding documents, getting
|
||||
* document references, and querying for documents (using the methods
|
||||
* inherited from [Query]{@link Query}).
|
||||
*
|
||||
* @class CollectionReference
|
||||
* @extends Query
|
||||
*/
|
||||
export declare class CollectionReference<AppModelType = firestore.DocumentData, DbModelType extends firestore.DocumentData = firestore.DocumentData> extends Query<AppModelType, DbModelType> implements firestore.CollectionReference<AppModelType, DbModelType> {
|
||||
/**
|
||||
* @private
|
||||
*
|
||||
* @param firestore The Firestore Database client.
|
||||
* @param path The Path of this collection.
|
||||
*/
|
||||
constructor(firestore: Firestore, path: ResourcePath, converter?: firestore.FirestoreDataConverter<AppModelType, DbModelType>);
|
||||
/**
|
||||
* Returns a resource path for this collection.
|
||||
* @private
|
||||
* @internal
|
||||
*/
|
||||
get _resourcePath(): ResourcePath;
|
||||
/**
|
||||
* The last path element of the referenced collection.
|
||||
*
|
||||
* @type {string}
|
||||
* @name CollectionReference#id
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let collectionRef = firestore.collection('col/doc/subcollection');
|
||||
* console.log(`ID of the subcollection: ${collectionRef.id}`);
|
||||
* ```
|
||||
*/
|
||||
get id(): string;
|
||||
/**
|
||||
* A reference to the containing Document if this is a subcollection, else
|
||||
* null.
|
||||
*
|
||||
* @type {DocumentReference|null}
|
||||
* @name CollectionReference#parent
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let collectionRef = firestore.collection('col/doc/subcollection');
|
||||
* let documentRef = collectionRef.parent;
|
||||
* console.log(`Parent name: ${documentRef.path}`);
|
||||
* ```
|
||||
*/
|
||||
get parent(): DocumentReference | null;
|
||||
/**
|
||||
* A string representing the path of the referenced collection (relative
|
||||
* to the root of the database).
|
||||
*
|
||||
* @type {string}
|
||||
* @name CollectionReference#path
|
||||
* @readonly
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let collectionRef = firestore.collection('col/doc/subcollection');
|
||||
* console.log(`Path of the subcollection: ${collectionRef.path}`);
|
||||
* ```
|
||||
*/
|
||||
get path(): string;
|
||||
/**
|
||||
* Retrieves the list of documents in this collection.
|
||||
*
|
||||
* The document references returned may include references to "missing
|
||||
* documents", i.e. document locations that have no document present but
|
||||
* which contain subcollections with documents. Attempting to read such a
|
||||
* document reference (e.g. via `.get()` or `.onSnapshot()`) will return a
|
||||
* `DocumentSnapshot` whose `.exists` property is false.
|
||||
*
|
||||
* @return {Promise<DocumentReference[]>} The list of documents in this
|
||||
* collection.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let collectionRef = firestore.collection('col');
|
||||
*
|
||||
* return collectionRef.listDocuments().then(documentRefs => {
|
||||
* return firestore.getAll(...documentRefs);
|
||||
* }).then(documentSnapshots => {
|
||||
* for (let documentSnapshot of documentSnapshots) {
|
||||
* if (documentSnapshot.exists) {
|
||||
* console.log(`Found document with data: ${documentSnapshot.id}`);
|
||||
* } else {
|
||||
* console.log(`Found missing document: ${documentSnapshot.id}`);
|
||||
* }
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
listDocuments(): Promise<Array<DocumentReference<AppModelType, DbModelType>>>;
|
||||
doc(): DocumentReference<AppModelType, DbModelType>;
|
||||
doc(documentPath: string): DocumentReference<AppModelType, DbModelType>;
|
||||
/**
|
||||
* Add a new document to this collection with the specified data, assigning
|
||||
* it a document ID automatically.
|
||||
*
|
||||
* @param {DocumentData} data An Object containing the data for the new
|
||||
* document.
|
||||
* @throws {Error} If the provided input is not a valid Firestore document.
|
||||
* @returns {Promise.<DocumentReference>} A Promise resolved with a
|
||||
* [DocumentReference]{@link DocumentReference} pointing to the
|
||||
* newly created document.
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* let collectionRef = firestore.collection('col');
|
||||
* collectionRef.add({foo: 'bar'}).then(documentReference => {
|
||||
* console.log(`Added document with name: ${documentReference.id}`);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
add(data: firestore.WithFieldValue<AppModelType>): Promise<DocumentReference<AppModelType, DbModelType>>;
|
||||
/**
|
||||
* Returns true if this `CollectionReference` is equal to the provided value.
|
||||
*
|
||||
* @param {*} other The value to compare against.
|
||||
* @return {boolean} true if this `CollectionReference` is equal to the
|
||||
* provided value.
|
||||
*/
|
||||
isEqual(other: firestore.CollectionReference<AppModelType, DbModelType>): boolean;
|
||||
withConverter(converter: null): CollectionReference;
|
||||
withConverter<NewAppModelType, NewDbModelType extends firestore.DocumentData = firestore.DocumentData>(converter: firestore.FirestoreDataConverter<NewAppModelType, NewDbModelType>): CollectionReference<NewAppModelType, NewDbModelType>;
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user